diff --git a/analyses/pluginALICE/ALICE_2011_S8909580.cc b/analyses/pluginALICE/ALICE_2011_S8909580.cc --- a/analyses/pluginALICE/ALICE_2011_S8909580.cc +++ b/analyses/pluginALICE/ALICE_2011_S8909580.cc @@ -1,103 +1,103 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ALICE_2011_S8909580 : public Analysis { public: ALICE_2011_S8909580() : Analysis("ALICE_2011_S8909580") {} public: void init() { - const UnstableFinalState ufs(Cuts::abseta < 15); + const UnstableParticles ufs(Cuts::abseta < 15); declare(ufs, "UFS"); _histPtK0s = bookHisto1D(1, 1, 1); _histPtLambda = bookHisto1D(2, 1, 1); _histPtAntiLambda = bookHisto1D(3, 1, 1); _histPtXi = bookHisto1D(4, 1, 1); _histPtPhi = bookHisto1D(5, 1, 1); _temp_h_Lambdas = bookHisto1D("TMP/h_Lambdas", refData(6, 1, 1)); _temp_h_Kzeros = bookHisto1D("TMP/h_Kzeros", refData(6, 1, 1)); _h_LamKzero = bookScatter2D(6, 1, 1); } void analyze(const Event& event) { const double weight = event.weight(); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); foreach (const Particle& p, ufs.particles()) { const double absrap = p.absrap(); const double pT = p.pT()/GeV; if (absrap < 0.8) { switch(p.pid()) { case 3312: case -3312: if ( !( p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) { _histPtXi->fill(pT, weight); } break; if (absrap < 0.75) { case 310: _histPtK0s->fill(pT, weight); _temp_h_Kzeros->fill(pT, 2*weight); break; case 3122: if ( !( p.hasAncestor(3322) || p.hasAncestor(-3322) || p.hasAncestor(3312) || p.hasAncestor(-3312) || p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) { _histPtLambda->fill(pT, weight); _temp_h_Lambdas->fill(pT, weight); } break; case -3122: if ( !( p.hasAncestor(3322) || p.hasAncestor(-3322) || p.hasAncestor(3312) || p.hasAncestor(-3312) || p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) { _histPtAntiLambda->fill(pT, weight); _temp_h_Lambdas->fill(pT, weight); } break; } if (absrap<0.6) { case 333: _histPtPhi->fill(pT, weight); break; } } } } } void finalize() { scale(_histPtK0s, 1./(1.5*sumOfWeights())); scale(_histPtLambda, 1./(1.5*sumOfWeights())); scale(_histPtAntiLambda, 1./(1.5*sumOfWeights())); scale(_histPtXi, 1./(1.6*sumOfWeights())); scale(_histPtPhi, 1./(1.2*sumOfWeights())); divide(_temp_h_Lambdas, _temp_h_Kzeros, _h_LamKzero); } private: Histo1DPtr _histPtK0s, _histPtLambda, _histPtAntiLambda, _histPtXi, _histPtPhi; Histo1DPtr _temp_h_Lambdas, _temp_h_Kzeros; Scatter2DPtr _h_LamKzero; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2011_S8909580); } diff --git a/analyses/pluginALICE/ALICE_2012_I1116147.cc b/analyses/pluginALICE/ALICE_2012_I1116147.cc --- a/analyses/pluginALICE/ALICE_2012_I1116147.cc +++ b/analyses/pluginALICE/ALICE_2012_I1116147.cc @@ -1,87 +1,87 @@ //-*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ALICE_2012_I1116147 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I1116147); /// Initialise projections and histograms void init() { - const UnstableFinalState ufs(Cuts::absrap < RAPMAX); + const UnstableParticles ufs(Cuts::absrap < RAPMAX); addProjection(ufs, "UFS"); // Check if cm energy is 7 TeV or 0.9 TeV if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) _cm_energy_case = 1; else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) _cm_energy_case = 2; if (_cm_energy_case == 0) throw UserError("Center of mass energy of the given input is neither 900 nor 7000 GeV."); // Book histos if (_cm_energy_case == 1) { _h_pi0 = bookHisto1D(2,1,1); } else { _h_pi0 = bookHisto1D(1,1,1); _h_eta = bookHisto1D(3,1,1); _h_etaToPion = bookScatter2D(4,1,1); } // Temporary plots with the binning of _h_etaToPion to construct the eta/pi0 ratio _temp_h_pion = bookHisto1D("TMP/h_pion", refData(4,1,1)); _temp_h_eta = bookHisto1D("TMP/h_eta", refData(4,1,1)); } /// Per-event analysis void analyze(const Event& event) { const double weight = event.weight(); - const FinalState& ufs = apply(event, "UFS"); + const FinalState& ufs = apply(event, "UFS"); for (const Particle& p : ufs.particles()) { const double normfactor = TWOPI*p.pT()/GeV*2*RAPMAX; if (p.pid() == 111) { // Neutral pion; ALICE corrects for pi0 feed-down from K_0_s and Lambda if (p.hasAncestor(310) || p.hasAncestor(3122) || p.hasAncestor(-3122)) continue; //< K_0_s, Lambda, Anti-Lambda _h_pi0->fill(p.pT()/GeV, weight/normfactor); _temp_h_pion->fill(p.pT()/GeV, weight); } else if (p.pid() == 221 && _cm_energy_case == 2) { // eta meson (only for 7 TeV) _h_eta->fill(p.pT()/GeV, weight/normfactor); _temp_h_eta->fill(p.pT()/GeV, weight); } } } /// Normalize histos and construct ratio void finalize() { scale(_h_pi0, crossSection()/microbarn/sumOfWeights()); if (_cm_energy_case == 2) { divide(_temp_h_eta, _temp_h_pion, _h_etaToPion); scale(_h_eta, crossSection()/microbarn/sumOfWeights()); } } private: const double RAPMAX = 0.8; int _cm_energy_case = 0; Histo1DPtr _h_pi0, _h_eta; Histo1DPtr _temp_h_pion, _temp_h_eta; Scatter2DPtr _h_etaToPion; }; DECLARE_RIVET_PLUGIN(ALICE_2012_I1116147); } diff --git a/analyses/pluginALICE/ALICE_2014_I1300380.cc b/analyses/pluginALICE/ALICE_2014_I1300380.cc --- a/analyses/pluginALICE/ALICE_2014_I1300380.cc +++ b/analyses/pluginALICE/ALICE_2014_I1300380.cc @@ -1,120 +1,120 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ALICE_2014_I1300380 : public Analysis { public: ALICE_2014_I1300380() : Analysis("ALICE_2014_I1300380") {} public: void init() { - const UnstableFinalState cfs(Cuts::absrap<0.5); + const UnstableParticles cfs(Cuts::absrap<0.5); declare(cfs, "CFS"); // Plots from the paper _histPtSigmaStarPlus = bookHisto1D("d01-x01-y01"); // Sigma*+ _histPtSigmaStarMinus = bookHisto1D("d01-x01-y02"); // Sigma*- _histPtSigmaStarPlusAnti = bookHisto1D("d01-x01-y03"); // anti Sigma*- _histPtSigmaStarMinusAnti = bookHisto1D("d01-x01-y04"); // anti Sigma*+ _histPtXiStar = bookHisto1D("d02-x01-y01"); // 0.5 * (xi star + anti xi star) _histAveragePt = bookProfile1D("d03-x01-y01"); // profile } void analyze(const Event& event) { const double weight = event.weight(); - const UnstableFinalState& cfs = apply(event, "CFS"); + const UnstableParticles& cfs = apply(event, "CFS"); foreach (const Particle& p, cfs.particles()) { // protections against mc generators decaying long-lived particles if ( !(p.hasAncestor(310) || p.hasAncestor(-310) || // K0s p.hasAncestor(130) || p.hasAncestor(-130) || // K0l p.hasAncestor(3322) || p.hasAncestor(-3322) || // Xi0 p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/- p.hasAncestor(3312) || p.hasAncestor(-3312) || // Xi-/+ p.hasAncestor(3334) || p.hasAncestor(-3334) )) // Omega-/+ { int aid = abs(p.pdgId()); if (aid == 211 || // pi+ aid == 321 || // K+ aid == 313 || // K*(892)0 aid == 2212 || // proton aid == 333 ) { // phi(1020) _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); } } // end if "rejection of long-lived particles" switch (p.pdgId()) { case 3224: _histPtSigmaStarPlus->fill(p.pT()/GeV, weight); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case -3224: _histPtSigmaStarPlusAnti->fill(p.pT()/GeV, weight); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case 3114: _histPtSigmaStarMinus->fill(p.pT()/GeV, weight); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case -3114: _histPtSigmaStarMinusAnti->fill(p.pT()/GeV, weight); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case 3324: _histPtXiStar->fill(p.pT()/GeV, weight); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case -3324: _histPtXiStar->fill(p.pT()/GeV, weight); _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case 3312: _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case -3312: _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case 3334: _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; case -3334: _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight); break; } } } void finalize() { scale(_histPtSigmaStarPlus, 1./sumOfWeights()); scale(_histPtSigmaStarPlusAnti, 1./sumOfWeights()); scale(_histPtSigmaStarMinus, 1./sumOfWeights()); scale(_histPtSigmaStarMinusAnti, 1./sumOfWeights()); scale(_histPtXiStar, 1./sumOfWeights()/ 2.); } private: // plots from the paper Histo1DPtr _histPtSigmaStarPlus; Histo1DPtr _histPtSigmaStarPlusAnti; Histo1DPtr _histPtSigmaStarMinus; Histo1DPtr _histPtSigmaStarMinusAnti; Histo1DPtr _histPtXiStar; Profile1DPtr _histAveragePt; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2014_I1300380); } diff --git a/analyses/pluginALICE/ALICE_2017_I1512110.cc b/analyses/pluginALICE/ALICE_2017_I1512110.cc --- a/analyses/pluginALICE/ALICE_2017_I1512110.cc +++ b/analyses/pluginALICE/ALICE_2017_I1512110.cc @@ -1,88 +1,88 @@ //-*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ALICE_2017_I1512110 : public Analysis { public: /// Constructor ALICE_2017_I1512110() : Analysis("ALICE_2017_I1512110"), _rapmax(0.8) { } void init() { - const UnstableFinalState ufs(Cuts::absrap < _rapmax); + const UnstableParticles ufs(Cuts::absrap < _rapmax); addProjection(ufs, "UFS"); _h_pi0 = bookHisto1D(3,1,1); _h_eta = bookHisto1D(4,1,1); _h_etaToPion = bookScatter2D(5,1,1); // temporary plots with the binning of _h_etaToPion // to construct the eta/pi0 ratio in the end _temp_h_pion = bookHisto1D("TMP/h_pion",refData(5,1,1)); _temp_h_eta = bookHisto1D("TMP/h_eta",refData(5,1,1)); } void analyze(const Event& event) { const double weight = event.weight(); - const UnstableFinalState& ufs = applyProjection(event, "UFS"); + const UnstableParticles& ufs = applyProjection(event, "UFS"); for (const Particle& p : ufs.particles()) { if (p.pid() == 111) { // neutral pion; ALICE corrects for pi0 feed-down if ( !(p.hasAncestor(310) || p.hasAncestor(130) || // K0_s, K0_l p.hasAncestor(321) || p.hasAncestor(-321) || // K+,K- p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda, Anti-Lambda p.hasAncestor(3212) || p.hasAncestor(-3212) || // Sigma0 p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigmas p.hasAncestor(3112) || p.hasAncestor(-3112) || // Sigmas p.hasAncestor(3322) || p.hasAncestor(-3322) || // Cascades p.hasAncestor(3312) || p.hasAncestor(-3312) )) // Cascades { _h_pi0->fill(p.pT()/GeV, weight /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_pion->fill(p.pT()/GeV, weight); } } else if (p.pid() == 221){ // eta meson _h_eta->fill(p.pT()/GeV, weight /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_eta->fill(p.pT()/GeV, weight); } } } void finalize() { scale(_h_pi0, crossSection()/picobarn/sumOfWeights()); scale(_h_eta, crossSection()/picobarn/sumOfWeights()); divide(_temp_h_eta, _temp_h_pion, _h_etaToPion); } private: double _rapmax; Histo1DPtr _h_pi0; Histo1DPtr _h_eta; Histo1DPtr _temp_h_pion; Histo1DPtr _temp_h_eta; Scatter2DPtr _h_etaToPion; }; DECLARE_RIVET_PLUGIN(ALICE_2017_I1512110); } diff --git a/analyses/pluginALICE/ALICE_2017_I1620477.cc b/analyses/pluginALICE/ALICE_2017_I1620477.cc --- a/analyses/pluginALICE/ALICE_2017_I1620477.cc +++ b/analyses/pluginALICE/ALICE_2017_I1620477.cc @@ -1,90 +1,90 @@ //-*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Tools/ParticleUtils.hh" namespace Rivet { class ALICE_2017_I1620477 : public Analysis { public: /// Constructor ALICE_2017_I1620477() : Analysis("ALICE_2017_I1620477"), _rapmax(0.8) { } void init() { - const UnstableFinalState ufs(Cuts::absrap < _rapmax); + const UnstableParticles ufs(Cuts::absrap < _rapmax); addProjection(ufs, "UFS"); _h_pi0 = bookHisto1D(1,1,1); _h_eta = bookHisto1D(2,1,1); _h_etaToPion = bookScatter2D(8,1,1); // temporary plots with the binning of _h_etaToPion // to construct the eta/pi0 ratio in the end _temp_h_pion = bookHisto1D("TMP/h_pion",refData(8,1,1)); _temp_h_eta = bookHisto1D("TMP/h_eta",refData(8,1,1)); } void analyze(const Event& event) { const double weight = event.weight(); - const UnstableFinalState& ufs = applyProjection(event, "UFS"); + const UnstableParticles& ufs = applyProjection(event, "UFS"); for(auto p: ufs.particles()) { if (p.pid() == 111) { // neutral pion; ALICE corrects for pi0 feed-down if ( !(p.hasAncestor(310) || p.hasAncestor(130) || // K0_s, K0_l p.hasAncestor(321) || p.hasAncestor(-321) || // K+,K- p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda, Anti-Lambda p.hasAncestor(3212) || p.hasAncestor(-3212) || // Sigma0 p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigmas p.hasAncestor(3112) || p.hasAncestor(-3112) || // Sigmas p.hasAncestor(3322) || p.hasAncestor(-3322) || // Cascades p.hasAncestor(3312) || p.hasAncestor(-3312) )) // Cascades { _h_pi0->fill(p.pT()/GeV, weight /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_pion->fill(p.pT()/GeV, weight); } } else if (p.pid() == 221) { // eta meson _h_eta->fill(p.pT()/GeV, weight /(TWOPI*p.pT()/GeV*2*_rapmax)); _temp_h_eta->fill(p.pT()/GeV, weight); } } } void finalize() { scale(_h_pi0, crossSection()/picobarn/sumOfWeights()); scale(_h_eta, crossSection()/picobarn/sumOfWeights()); divide(_temp_h_eta, _temp_h_pion, _h_etaToPion); } private: double _rapmax; Histo1DPtr _h_pi0; Histo1DPtr _h_eta; Histo1DPtr _temp_h_pion; Histo1DPtr _temp_h_eta; Scatter2DPtr _h_etaToPion; }; DECLARE_RIVET_PLUGIN(ALICE_2017_I1620477); } diff --git a/analyses/pluginATLAS/ATLAS_2011_I944826.cc b/analyses/pluginATLAS/ATLAS_2011_I944826.cc --- a/analyses/pluginATLAS/ATLAS_2011_I944826.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I944826.cc @@ -1,260 +1,260 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ATLAS_2011_I944826 : public Analysis { public: /// Constructor ATLAS_2011_I944826() : Analysis("ATLAS_2011_I944826") { _sum_w_ks = 0.0; _sum_w_lambda = 0.0; _sum_w_passed = 0.0; } /// Book histograms and initialise projections before the run void init() { - UnstableFinalState ufs(Cuts::pT > 100*MeV); + UnstableParticles ufs(Cuts::pT > 100*MeV); declare(ufs, "UFS"); ChargedFinalState mbts(Cuts::absetaIn(2.09, 3.84)); declare(mbts, "MBTS"); IdentifiedFinalState nstable(Cuts::abseta < 2.5 && Cuts::pT >= 100*MeV); nstable.acceptIdPair(PID::ELECTRON) .acceptIdPair(PID::MUON) .acceptIdPair(PID::PIPLUS) .acceptIdPair(PID::KPLUS) .acceptIdPair(PID::PROTON); declare(nstable, "nstable"); if (fuzzyEquals(sqrtS()/GeV, 7000, 1e-3)) { _hist_Ks_pT = bookHisto1D(1, 1, 1); _hist_Ks_y = bookHisto1D(2, 1, 1); _hist_Ks_mult = bookHisto1D(3, 1, 1); _hist_L_pT = bookHisto1D(7, 1, 1); _hist_L_y = bookHisto1D(8, 1, 1); _hist_L_mult = bookHisto1D(9, 1, 1); _hist_Ratio_v_y = bookScatter2D(13, 1, 1); _hist_Ratio_v_pT = bookScatter2D(14, 1, 1); // _temp_lambda_v_y = Histo1D(10, 0.0, 2.5); _temp_lambdabar_v_y = Histo1D(10, 0.0, 2.5); _temp_lambda_v_pT = Histo1D(18, 0.5, 4.1); _temp_lambdabar_v_pT = Histo1D(18, 0.5, 4.1); } else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { _hist_Ks_pT = bookHisto1D(4, 1, 1); _hist_Ks_y = bookHisto1D(5, 1, 1); _hist_Ks_mult = bookHisto1D(6, 1, 1); _hist_L_pT = bookHisto1D(10, 1, 1); _hist_L_y = bookHisto1D(11, 1, 1); _hist_L_mult = bookHisto1D(12, 1, 1); _hist_Ratio_v_y = bookScatter2D(15, 1, 1); _hist_Ratio_v_pT = bookScatter2D(16, 1, 1); // _temp_lambda_v_y = Histo1D(5, 0.0, 2.5); _temp_lambdabar_v_y = Histo1D(5, 0.0, 2.5); _temp_lambda_v_pT = Histo1D(8, 0.5, 3.7); _temp_lambdabar_v_pT = Histo1D(8, 0.5, 3.7); } } // This function is required to impose the flight time cuts on Kaons and Lambdas double getPerpFlightDistance(const Rivet::Particle& p) { const HepMC::GenParticle* genp = p.genParticle(); const HepMC::GenVertex* prodV = genp->production_vertex(); const HepMC::GenVertex* decV = genp->end_vertex(); const HepMC::ThreeVector prodPos = prodV->point3d(); if (decV) { const HepMC::ThreeVector decPos = decV->point3d(); double dy = prodPos.y() - decPos.y(); double dx = prodPos.x() - decPos.x(); return add_quad(dx, dy); } return numeric_limits::max(); } bool daughtersSurviveCuts(const Rivet::Particle& p) { // We require the Kshort or Lambda to decay into two charged // particles with at least pT = 100 MeV inside acceptance region const HepMC::GenParticle* genp = p.genParticle(); const HepMC::GenVertex* decV = genp->end_vertex(); bool decision = true; if (!decV) return false; if (decV->particles_out_size() == 2) { std::vector pTs; std::vector charges; std::vector etas; foreach (const HepMC::GenParticle* gp, particles(decV, HepMC::children)) { pTs.push_back(gp->momentum().perp()); etas.push_back(fabs(gp->momentum().eta())); charges.push_back( Rivet::PID::threeCharge(gp->pdg_id()) ); // gp->print(); } if ( (pTs[0]/Rivet::GeV < 0.1) || (pTs[1]/Rivet::GeV < 0.1) ) { decision = false; MSG_DEBUG("Failed pT cut: " << pTs[0]/Rivet::GeV << " " << pTs[1]/Rivet::GeV); } if ( etas[0] > 2.5 || etas[1] > 2.5 ) { decision = false; MSG_DEBUG("Failed eta cut: " << etas[0] << " " << etas[1]); } if ( charges[0] * charges[1] >= 0 ) { decision = false; MSG_DEBUG("Failed opposite charge cut: " << charges[0] << " " << charges[1]); } } else { decision = false; MSG_DEBUG("Failed nDaughters cut: " << decV->particles_out_size()); } return decision; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // ATLAS MBTS trigger requirement of at least one hit in either hemisphere if (apply(event, "MBTS").size() < 1) { MSG_DEBUG("Failed trigger cut"); vetoEvent; } // Veto event also when we find less than 2 particles in the acceptance region of type 211,2212,11,13,321 if (apply(event, "nstable").size() < 2) { MSG_DEBUG("Failed stable particle cut"); vetoEvent; } _sum_w_passed += weight; // This ufs holds all the Kaons and Lambdas - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); // Some conters int n_KS0 = 0; int n_LAMBDA = 0; // Particle loop foreach (const Particle& p, ufs.particles()) { // General particle quantities const double pT = p.pT(); const double y = p.rapidity(); const PdgId apid = p.abspid(); double flightd = 0.0; // Look for Kaons, Lambdas switch (apid) { case PID::K0S: flightd = getPerpFlightDistance(p); if (!inRange(flightd/mm, 4., 450.) ) { MSG_DEBUG("Kaon failed flight distance cut:" << flightd); break; } if (daughtersSurviveCuts(p) ) { _hist_Ks_y ->fill(y, weight); _hist_Ks_pT->fill(pT/GeV, weight); _sum_w_ks += weight; n_KS0++; } break; case PID::LAMBDA: if (pT < 0.5*GeV) { // Lambdas have an additional pT cut of 500 MeV MSG_DEBUG("Lambda failed pT cut:" << pT/GeV << " GeV"); break; } flightd = getPerpFlightDistance(p); if (!inRange(flightd/mm, 17., 450.)) { MSG_DEBUG("Lambda failed flight distance cut:" << flightd/mm << " mm"); break; } if ( daughtersSurviveCuts(p) ) { if (p.pid() == PID::LAMBDA) { _temp_lambda_v_y.fill(fabs(y), weight); _temp_lambda_v_pT.fill(pT/GeV, weight); _hist_L_y->fill(y, weight); _hist_L_pT->fill(pT/GeV, weight); _sum_w_lambda += weight; n_LAMBDA++; } else if (p.pid() == -PID::LAMBDA) { _temp_lambdabar_v_y.fill(fabs(y), weight); _temp_lambdabar_v_pT.fill(pT/GeV, weight); } } break; } } // Fill multiplicity histos _hist_Ks_mult->fill(n_KS0, weight); _hist_L_mult->fill(n_LAMBDA, weight); } /// Normalise histograms etc., after the run void finalize() { MSG_DEBUG("# Events that pass the trigger: " << _sum_w_passed); MSG_DEBUG("# Kshort events: " << _sum_w_ks); MSG_DEBUG("# Lambda events: " << _sum_w_lambda); /// @todo Replace with normalize()? scale(_hist_Ks_pT, 1.0/_sum_w_ks); scale(_hist_Ks_y, 1.0/_sum_w_ks); scale(_hist_Ks_mult, 1.0/_sum_w_passed); /// @todo Replace with normalize()? scale(_hist_L_pT, 1.0/_sum_w_lambda); scale(_hist_L_y, 1.0/_sum_w_lambda); scale(_hist_L_mult, 1.0/_sum_w_passed); // Division of histograms to obtain lambda_bar/lambda ratios divide(_temp_lambdabar_v_y, _temp_lambda_v_y, _hist_Ratio_v_y); divide(_temp_lambdabar_v_pT, _temp_lambda_v_pT, _hist_Ratio_v_pT); } private: /// Counters double _sum_w_ks, _sum_w_lambda, _sum_w_passed; /// @name Persistent histograms //@{ Histo1DPtr _hist_Ks_pT, _hist_Ks_y, _hist_Ks_mult; Histo1DPtr _hist_L_pT, _hist_L_y, _hist_L_mult; Scatter2DPtr _hist_Ratio_v_pT, _hist_Ratio_v_y; //@} /// @name Temporary histograms //@{ Histo1D _temp_lambda_v_y, _temp_lambdabar_v_y; Histo1D _temp_lambda_v_pT, _temp_lambdabar_v_pT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I944826); } diff --git a/analyses/pluginATLAS/ATLAS_2011_S9035664.cc b/analyses/pluginATLAS/ATLAS_2011_S9035664.cc --- a/analyses/pluginATLAS/ATLAS_2011_S9035664.cc +++ b/analyses/pluginATLAS/ATLAS_2011_S9035664.cc @@ -1,138 +1,138 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief J/psi production at ATLAS class ATLAS_2011_S9035664: public Analysis { public: /// Constructor ATLAS_2011_S9035664() : Analysis("ATLAS_2011_S9035664") {} /// @name Analysis methods //@{ void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _nonPrRapHigh = bookHisto1D( 14, 1, 1); _nonPrRapMedHigh = bookHisto1D( 13, 1, 1); _nonPrRapMedLow = bookHisto1D( 12, 1, 1); _nonPrRapLow = bookHisto1D( 11, 1, 1); _PrRapHigh = bookHisto1D( 18, 1, 1); _PrRapMedHigh = bookHisto1D( 17, 1, 1); _PrRapMedLow = bookHisto1D( 16, 1, 1); _PrRapLow = bookHisto1D( 15, 1, 1); _IncRapHigh = bookHisto1D( 20, 1, 1); _IncRapMedHigh = bookHisto1D( 21, 1, 1); _IncRapMedLow = bookHisto1D( 22, 1, 1); _IncRapLow = bookHisto1D( 23, 1, 1); } void analyze(const Event& e) { // Get event weight for histo filling const double weight = e.weight(); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { if (p.abspid() != 443) continue; const GenVertex* gv = p.genParticle()->production_vertex(); bool nonPrompt = false; if (gv) { foreach (const GenParticle* pi, Rivet::particles(gv, HepMC::ancestors)) { const PdgId pid2 = pi->pdg_id(); if (PID::isHadron(pid2) && PID::hasBottom(pid2)) { nonPrompt = true; break; } } } double absrap = p.absrap(); double xp = p.perp(); if (absrap<=2.4 and absrap>2.) { if (nonPrompt) _nonPrRapHigh->fill(xp, weight); else if (!nonPrompt) _PrRapHigh->fill(xp, weight); _IncRapHigh->fill(xp, weight); } else if (absrap<=2. and absrap>1.5) { if (nonPrompt) _nonPrRapMedHigh->fill(xp, weight); else if (!nonPrompt) _PrRapMedHigh->fill(xp, weight); _IncRapMedHigh->fill(xp, weight); } else if (absrap<=1.5 and absrap>0.75) { if (nonPrompt) _nonPrRapMedLow->fill(xp, weight); else if (!nonPrompt) _PrRapMedLow->fill(xp, weight); _IncRapMedLow->fill(xp, weight); } else if (absrap<=0.75) { if (nonPrompt) _nonPrRapLow->fill(xp, weight); else if (!nonPrompt) _PrRapLow->fill(xp, weight); _IncRapLow->fill(xp, weight); } } } /// Finalize void finalize() { double factor = crossSection()/nanobarn*0.0593; scale(_PrRapHigh , factor/sumOfWeights()); scale(_PrRapMedHigh , factor/sumOfWeights()); scale(_PrRapMedLow , factor/sumOfWeights()); scale(_PrRapLow , factor/sumOfWeights()); scale(_nonPrRapHigh , factor/sumOfWeights()); scale(_nonPrRapMedHigh, factor/sumOfWeights()); scale(_nonPrRapMedLow , factor/sumOfWeights()); scale(_nonPrRapLow , factor/sumOfWeights()); scale(_IncRapHigh , 1000.*factor/sumOfWeights()); scale(_IncRapMedHigh , 1000.*factor/sumOfWeights()); scale(_IncRapMedLow , 1000.*factor/sumOfWeights()); scale(_IncRapLow , 1000.*factor/sumOfWeights()); } //@} private: Histo1DPtr _nonPrRapHigh; Histo1DPtr _nonPrRapMedHigh; Histo1DPtr _nonPrRapMedLow; Histo1DPtr _nonPrRapLow; Histo1DPtr _PrRapHigh; Histo1DPtr _PrRapMedHigh; Histo1DPtr _PrRapMedLow; Histo1DPtr _PrRapLow; Histo1DPtr _IncRapHigh; Histo1DPtr _IncRapMedHigh; Histo1DPtr _IncRapMedLow; Histo1DPtr _IncRapLow; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_S9035664); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1082009.cc b/analyses/pluginATLAS/ATLAS_2012_I1082009.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1082009.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1082009.cc @@ -1,147 +1,147 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ATLAS_2012_I1082009 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ATLAS_2012_I1082009() : Analysis("ATLAS_2012_I1082009"), _weight25_30(0.),_weight30_40(0.),_weight40_50(0.), _weight50_60(0.),_weight60_70(0.),_weight25_70(0.) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Input for the jets: No neutrinos, no muons VetoedFinalState veto; veto.addVetoPairId(PID::MUON); veto.vetoNeutrinos(); FastJets jets(veto, FastJets::ANTIKT, 0.6); declare(jets, "jets"); // unstable final-state for D* - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _h_pt25_30 = bookHisto1D( 8,1,1); _h_pt30_40 = bookHisto1D( 9,1,1); _h_pt40_50 = bookHisto1D(10,1,1); _h_pt50_60 = bookHisto1D(11,1,1); _h_pt60_70 = bookHisto1D(12,1,1); _h_pt25_70 = bookHisto1D(13,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // get the jets Jets jets; foreach (const Jet& jet, apply(event, "jets").jetsByPt(25.0*GeV)) { if ( jet.abseta() < 2.5 ) jets.push_back(jet); } // get the D* mesons - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); Particles Dstar; foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); if(id==413) Dstar.push_back(p); } // loop over the jobs foreach (const Jet& jet, jets ) { double perp = jet.perp(); bool found = false; double z(0.); if(perp<25.||perp>70.) continue; foreach(const Particle & p, Dstar) { if(p.perp()<7.5) continue; if(deltaR(p, jet.momentum())<0.6) { Vector3 axis = jet.p3().unit(); z = axis.dot(p.p3())/jet.E(); if(z<0.3) continue; found = true; break; } } _weight25_70 += weight; if(found) _h_pt25_70->fill(z,weight); if(perp>=25.&&perp<30.) { _weight25_30 += weight; if(found) _h_pt25_30->fill(z,weight); } else if(perp>=30.&&perp<40.) { _weight30_40 += weight; if(found) _h_pt30_40->fill(z,weight); } else if(perp>=40.&&perp<50.) { _weight40_50 += weight; if(found) _h_pt40_50->fill(z,weight); } else if(perp>=50.&&perp<60.) { _weight50_60 += weight; if(found) _h_pt50_60->fill(z,weight); } else if(perp>=60.&&perp<70.) { _weight60_70 += weight; if(found) _h_pt60_70->fill(z,weight); } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_pt25_30,1./_weight25_30); scale(_h_pt30_40,1./_weight30_40); scale(_h_pt40_50,1./_weight40_50); scale(_h_pt50_60,1./_weight50_60); scale(_h_pt60_70,1./_weight60_70); scale(_h_pt25_70,1./_weight25_70); } //@} private: /// @name Histograms //@{ double _weight25_30,_weight30_40,_weight40_50; double _weight50_60,_weight60_70,_weight25_70; Histo1DPtr _h_pt25_30; Histo1DPtr _h_pt30_40; Histo1DPtr _h_pt40_50; Histo1DPtr _h_pt50_60; Histo1DPtr _h_pt60_70; Histo1DPtr _h_pt25_70; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1082009); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1093734.cc b/analyses/pluginATLAS/ATLAS_2012_I1093734.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1093734.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1093734.cc @@ -1,320 +1,320 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { namespace { inline double sumAB(vector vecX, vector vecY, vector vecW) { assert(vecX.size() == vecY.size() && vecX.size() == vecW.size()); double sum(0); for (size_t i = 0; i < vecX.size(); i++) sum += vecW[i] * vecX[i] * vecY[i]; return sum; } inline double sumA(vector vecX, vector vecW) { assert(vecX.size() == vecW.size()); double sum(0); for (size_t i = 0; i < vecX.size(); i++) sum += vecX[i]*vecW[i]; return sum; } inline double sumW(vector vecW) { double sum(0); for (size_t i = 0; i < vecW.size(); i++) sum += vecW[i]; return sum; } inline double mean(vector vecX, vector vecW) { return sumA(vecX, vecW) / sumW(vecW); } inline double standard_deviation(vector vecX, vector vecW) { const double x_bar = mean(vecX, vecW); double sum(0); for (size_t i = 0; i < vecX.size(); i++) { sum += vecW[i] * sqr(vecX[i] - x_bar); } return sqrt( sum / sumW(vecW) ); } inline double a0_regression(vector vecX, vector vecY, vector vecW) { const double numerator = sumA(vecY, vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumAB(vecX, vecY, vecW); const double denominator = sumW(vecW) * sumAB(vecX, vecX, vecW) - sumA(vecX, vecW) * sumA(vecX, vecW); return numerator / denominator; } inline double a1_regression(vector vecX, vector vecY, vector vecW) { const double numerator = sumW(vecW) * sumAB(vecX,vecY,vecW) - sumA(vecX, vecW) * sumA(vecY, vecW); const double denominator = sumW(vecW) * sumAB(vecX,vecX,vecW) - sumA(vecX, vecW) * sumA(vecX, vecW); return numerator/ denominator; } inline double a1_regression2(vector vecX, vector vecY, vector vecW) { const double x_bar = mean(vecX, vecW); const double y_bar = mean(vecY, vecW); double sumXY(0); for (size_t i = 0; i < vecX.size(); i++) { sumXY += vecW[i] * (vecY[i]-y_bar) * (vecX[i]-x_bar); } return sumXY / ( standard_deviation(vecX, vecW) * standard_deviation(vecY, vecW) * sumW(vecW) ); } inline double quadra_sum_residual(vector vecX, vector vecY, vector vecW) { const double a0 = a0_regression(vecX, vecY, vecW); const double a1 = a1_regression(vecX, vecY, vecW); double sum(0); for (size_t i = 0; i < vecX.size(); i++) { const double y_est = a0 + a1*vecX[i]; sum += vecW[i] * sqr(vecY[i] - y_est); } return sum; } inline double error_on_slope(vector vecX, vector vecY, vector vecW) { const double quadra_sum_res = quadra_sum_residual(vecX, vecY, vecW); const double sqrt_quadra_sum_x = standard_deviation(vecX, vecW) * sqrt(sumW(vecW)); return sqrt(quadra_sum_res/(sumW(vecW)-2)) / sqrt_quadra_sum_x; } } /// Forward-backward and azimuthal correlations in minimum bias events class ATLAS_2012_I1093734 : public Analysis { public: /// Constructor ATLAS_2012_I1093734() : Analysis("ATLAS_2012_I1093734") { // Stat convergence happens around 20k events, so it doesn't make sense to run this // analysis with much less than that. Given that, lets avoid some unnecessary vector // resizing by allocating sensible amounts in the first place. for (int ipt = 0; ipt < NPTBINS; ++ipt) { for (int k = 0; k < NETABINS; ++k) { _vecsNchF [ipt][k].reserve(10000); _vecsNchB [ipt][k].reserve(10000); _vecWeight[ipt][k].reserve(10000); if (ipt == 0) { _vecsSumptF[k].reserve(10000); _vecsSumptB[k].reserve(10000); } } } } public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // FB correlations part // Projections for (int ipt = 0; ipt < NPTBINS; ++ipt) { const double ptmin = PTMINVALUES[ipt]*MeV; for (int ieta = 0; ieta < NETABINS; ++ieta) { declare(ChargedFinalState(-ETAVALUES[ieta], -ETAVALUES[ieta]+0.5, ptmin), "Tracks"+ETABINNAMES[ieta]+"B"+PTBINNAMES[ipt]); declare(ChargedFinalState( ETAVALUES[ieta]-0.5, ETAVALUES[ieta], ptmin), "Tracks"+ETABINNAMES[ieta]+"F"+PTBINNAMES[ipt]); } declare(ChargedFinalState(-2.5, 2.5, ptmin), "CFS" + PTBINNAMES[ipt]); } // Histos if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) { for (int ipt = 0; ipt < NPTBINS ; ++ipt ) _s_NchCorr_vsEta[ipt] = bookScatter2D(1+ipt, 2, 1, true); for (int ieta = 0; ieta < NETABINS; ++ieta) _s_NchCorr_vsPt [ieta] = bookScatter2D(8+ieta, 2, 1, true); _s_PtsumCorr = bookScatter2D(13, 2, 1, true); } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) { _s_NchCorr_vsEta[0] = bookScatter2D(14, 2, 1, true); _s_PtsumCorr = bookScatter2D(15, 2, 1, true); } // Azimuthal correlations part // Projections const double ptmin = 500*MeV; declare(ChargedFinalState(-2.5, 2.5, ptmin), "ChargedTracks25"); declare(ChargedFinalState(-2.0, 2.0, ptmin), "ChargedTracks20"); declare(ChargedFinalState(-1.0, 1.0, ptmin), "ChargedTracks10"); // Histos /// @todo Declare/book as temporary for (size_t ieta = 0; ieta < 3; ++ieta) { if (fuzzyEquals(sqrtS(), 7000*GeV, 1e-3)) { _s_dphiMin[ieta] = bookScatter2D(2+2*ieta, 1, 1, true); _s_diffSO[ieta] = bookScatter2D(8+2*ieta, 1, 1, true); _th_dphi[ieta] = YODA::Histo1D(refData(2+2*ieta, 1, 1)); _th_same[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1)); _th_oppo[ieta] = YODA::Histo1D(refData(8+2*ieta, 1, 1)); } else if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) { _s_dphiMin[ieta] = bookScatter2D(1+2*ieta, 1, 1, true); _s_diffSO[ieta] = bookScatter2D(7+2*ieta, 1, 1, true); _th_dphi[ieta] = YODA::Histo1D(refData(1+2*ieta, 1, 1)); _th_same[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1)); _th_oppo[ieta] = YODA::Histo1D(refData(7+2*ieta, 1, 1)); } } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); for (int ipt = 0; ipt < NPTBINS; ++ipt) { const FinalState& charged = apply(event, "CFS" + PTBINNAMES[ipt]); if (charged.particles().size() >= 2) { for (int ieta = 0; ieta < NETABINS; ++ieta) { const string fname = "Tracks" + ETABINNAMES[ieta] + "F" + PTBINNAMES[ipt]; const string bname = "Tracks" + ETABINNAMES[ieta] + "B" + PTBINNAMES[ipt]; const ParticleVector particlesF = apply(event, fname).particles(); const ParticleVector particlesB = apply(event, bname).particles(); _vecsNchF[ipt][ieta].push_back((double) particlesF.size()); _vecsNchB[ipt][ieta].push_back((double) particlesB.size()); _vecWeight[ipt][ieta].push_back(weight); // Sum pT only for 100 MeV particles if (ipt == 0) { double sumptF = 0; double sumptB = 0; foreach (const Particle& p, particlesF) sumptF += p.pT(); foreach (const Particle& p, particlesB) sumptB += p.pT(); _vecsSumptF[ieta].push_back(sumptF); _vecsSumptB[ieta].push_back(sumptB); } } } } string etabin[3] = { "10", "20", "25" }; for (int ieta = 0; ieta < 3; ieta++) { const string fname = "ChargedTracks" + etabin[ieta]; const ParticleVector partTrks = apply(event, fname).particlesByPt(); // Find the leading track and fill the temp histograms const Particle& plead = partTrks[0]; foreach (const Particle& p, partTrks) { if (&plead == &p) continue; ///< Don't compare the lead particle to itself const double dphi = deltaPhi(p.momentum(), plead.momentum()); _th_dphi[ieta].fill(dphi, weight); const bool sameside = (plead.eta() * p.eta() > 0); (sameside ? _th_same : _th_oppo)[ieta].fill(dphi, weight); } } } /// Finalize void finalize() { // FB part // @todo For 2D plots we will need _vecsNchF[i], _vecsNchB[j] for (int ipt = 0; ipt < NPTBINS; ++ipt) { for (int ieta = 0; ieta < NETABINS; ++ieta) { _s_NchCorr_vsEta[ipt]->point(ieta).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); _s_NchCorr_vsEta[ipt]->point(ieta).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); } // There is just one plot at 900 GeV so exit the loop here if (fuzzyEquals(sqrtS(), 900*GeV, 1e-3) && ipt == 0) break; } if (!fuzzyEquals(sqrtS(), 900*GeV, 1e-3)) { ///< No plots at 900 GeV for (int ieta = 0; ieta < NETABINS; ++ieta) { for (int ipt = 0; ipt < NPTBINS; ++ipt) { _s_NchCorr_vsPt[ieta]->point(ipt).setY(a1_regression2(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); _s_NchCorr_vsPt[ieta]->point(ipt).setYErr(error_on_slope(_vecsNchF[ipt][ieta], _vecsNchB[ipt][ieta], _vecWeight[ipt][ieta])); } } } // Sum pt only for 100 MeV particles for (int ieta = 0; ieta < NETABINS; ++ieta) { _s_PtsumCorr->point(ieta).setY(a1_regression2(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta])); _s_PtsumCorr->point(ieta).setYErr(error_on_slope(_vecsSumptF[ieta], _vecsSumptB[ieta], _vecWeight[0][ieta])); } // Azimuthal part for (int ieta = 0; ieta < 3; ieta++) { /// @note We don't just do a subtraction because of the risk of negative values and negative errors /// @todo Should the difference always be shown as positive?, i.e. y -> abs(y), etc. /// @todo Should the normalization be done _after_ the -ve value treatment? YODA::Histo1D hdiffSO = _th_same[ieta] - _th_oppo[ieta]; hdiffSO.normalize(hdiffSO.bin(0).xWidth()); for (size_t i = 0; i < hdiffSO.numBins(); ++i) { const double y = hdiffSO.bin(i).height() >= 0 ? hdiffSO.bin(i).height() : 0; const double yerr = hdiffSO.bin(i).heightErr() >= 0 ? hdiffSO.bin(i).heightErr() : 0; _s_diffSO[ieta]->point(i).setY(y, yerr); } // Extract minimal value double histMin = _th_dphi[ieta].bin(0).height(); for (size_t iphi = 1; iphi < _th_dphi[ieta].numBins(); ++iphi) { histMin = std::min(histMin, _th_dphi[ieta].bin(iphi).height()); } // Build scatter of differences double sumDiff = 0; for (size_t iphi = 0; iphi < _th_dphi[ieta].numBins(); ++iphi) { const double diff = _th_dphi[ieta].bin(iphi).height() - histMin; _s_dphiMin[ieta]->point(iphi).setY(diff, _th_dphi[ieta].bin(iphi).heightErr()); sumDiff += diff; } // Normalize _s_dphiMin[ieta]->scale(1, 1/sumDiff); } } //@} private: static const int NPTBINS = 7; static const int NETABINS = 5; static const double PTMINVALUES[NPTBINS]; static const string PTBINNAMES[NPTBINS]; static const double ETAVALUES[NETABINS]; static const string ETABINNAMES[NETABINS]; vector _vecWeight[NPTBINS][NETABINS]; vector _vecsNchF[NPTBINS][NETABINS]; vector _vecsNchB[NPTBINS][NETABINS]; vector _vecsSumptF[NETABINS]; vector _vecsSumptB[NETABINS]; /// @name Histograms //@{ Scatter2DPtr _s_NchCorr_vsEta[NPTBINS], _s_NchCorr_vsPt[NETABINS], _s_PtsumCorr; Scatter2DPtr _s_dphiMin[3], _s_diffSO[3]; YODA::Histo1D _th_dphi[3], _th_same[3], _th_oppo[3]; //@} }; /// @todo Initialize these inline at declaration with C++11 const double ATLAS_2012_I1093734::PTMINVALUES[] = {100, 200, 300, 500, 1000, 1500, 2000 }; const string ATLAS_2012_I1093734::PTBINNAMES[] = { "100", "200", "300", "500", "1000", "1500", "2000" }; const double ATLAS_2012_I1093734::ETAVALUES[] = {0.5, 1.0, 1.5, 2.0, 2.5}; const string ATLAS_2012_I1093734::ETABINNAMES[] = { "05", "10", "15", "20", "25" }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1093734); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1094568.cc b/analyses/pluginATLAS/ATLAS_2012_I1094568.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1094568.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1094568.cc @@ -1,366 +1,366 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/HadronicFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { struct ATLAS_2012_I1094568_Plots { // Track which veto region this is, to match the autobooked histograms int region_index; // Lower rapidity boundary or veto region double y_low; // Upper rapidity boundary or veto region double y_high; double vetoJetPt_Q0; double vetoJetPt_Qsum; // Histograms to store the veto jet pT and sum(veto jet pT) histograms. Histo1DPtr _h_vetoJetPt_Q0; Histo1DPtr _h_vetoJetPt_Qsum; // Scatter2Ds for the gap fractions Scatter2DPtr _d_gapFraction_Q0; Scatter2DPtr _d_gapFraction_Qsum; }; /// Top pair production with central jet veto class ATLAS_2012_I1094568 : public Analysis { public: /// Constructor ATLAS_2012_I1094568() : Analysis("ATLAS_2012_I1094568") { } /// Book histograms and initialise projections before the run void init() { const FinalState fs(Cuts::abseta < 4.5); declare(fs, "ALL_FS"); /// Get electrons from truth record IdentifiedFinalState elec_fs(Cuts::abseta < 2.47 && Cuts::pT > 25*GeV); elec_fs.acceptIdPair(PID::ELECTRON); declare(elec_fs, "ELEC_FS"); /// Get muons which pass the initial kinematic cuts: IdentifiedFinalState muon_fs(Cuts::abseta < 2.5 && Cuts::pT > 20*GeV); muon_fs.acceptIdPair(PID::MUON); declare(muon_fs, "MUON_FS"); /// Get all neutrinos. These will not be used to form jets. /// We'll use the highest 2 pT neutrinos to calculate the MET IdentifiedFinalState neutrino_fs(Cuts::abseta < 4.5); neutrino_fs.acceptNeutrinos(); declare(neutrino_fs, "NEUTRINO_FS"); // Final state used as input for jet-finding. // We include everything except the muons and neutrinos VetoedFinalState jet_input(fs); jet_input.vetoNeutrinos(); jet_input.addVetoPairId(PID::MUON); declare(jet_input, "JET_INPUT"); // Get the jets FastJets jets(jet_input, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); // Initialise weight counter m_total_weight = 0.0; // Init histogramming for the various regions m_plots[0].region_index = 1; m_plots[0].y_low = 0.0; m_plots[0].y_high = 0.8; initializePlots(m_plots[0]); // m_plots[1].region_index = 2; m_plots[1].y_low = 0.8; m_plots[1].y_high = 1.5; initializePlots(m_plots[1]); // m_plots[2].region_index = 3; m_plots[2].y_low = 1.5; m_plots[2].y_high = 2.1; initializePlots(m_plots[2]); // m_plots[3].region_index = 4; m_plots[3].y_low = 0.0; m_plots[3].y_high = 2.1; initializePlots(m_plots[3]); } void initializePlots(ATLAS_2012_I1094568_Plots& plots) { const string vetoPt_Q0_name = "TMP/vetoJetPt_Q0_" + to_str(plots.region_index); plots.vetoJetPt_Q0 = 0.0; plots._h_vetoJetPt_Q0 = bookHisto1D(vetoPt_Q0_name, 200, 0.0, 1000.0); plots._d_gapFraction_Q0 = bookScatter2D(plots.region_index, 1, 1); foreach (Point2D p, refData(plots.region_index, 1, 1).points()) { p.setY(0, 0); plots._d_gapFraction_Q0->addPoint(p); } const string vetoPt_Qsum_name = "TMP/vetoJetPt_Qsum_" + to_str(plots.region_index); plots._h_vetoJetPt_Qsum = bookHisto1D(vetoPt_Qsum_name, 200, 0.0, 1000.0); plots._d_gapFraction_Qsum = bookScatter2D(plots.region_index, 2, 1); plots.vetoJetPt_Qsum = 0.0; foreach (Point2D p, refData(plots.region_index, 2, 1).points()) { p.setY(0, 0); plots._d_gapFraction_Qsum->addPoint(p); } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); /// Get the various sets of final state particles const Particles& elecFS = apply(event, "ELEC_FS").particlesByPt(); const Particles& muonFS = apply(event, "MUON_FS").particlesByPt(); const Particles& neutrinoFS = apply(event, "NEUTRINO_FS").particlesByPt(); // Get all jets with pT > 25 GeV const Jets& jets = apply(event, "JETS").jetsByPt(25.0*GeV); // Keep any jets that pass the initial rapidity cut vector central_jets; foreach(const Jet& j, jets) { if (j.absrap() < 2.4) central_jets.push_back(&j); } // For each of the jets that pass the rapidity cut, only keep those that are not // too close to any leptons vector good_jets; foreach(const Jet* j, central_jets) { bool goodJet = true; foreach (const Particle& e, elecFS) { double elec_jet_dR = deltaR(e.momentum(), j->momentum()); if (elec_jet_dR < 0.4) { goodJet = false; break; } } if (!goodJet) continue; if (!goodJet) continue; foreach (const Particle& m, muonFS) { double muon_jet_dR = deltaR(m.momentum(), j->momentum()); if (muon_jet_dR < 0.4) { goodJet = false; break; } } if (!goodJet) continue; good_jets.push_back(j); } // Get b hadrons with pT > 5 GeV - /// @todo This is a hack -- replace with UnstableFinalState + /// @todo This is a hack -- replace with UnstableParticles vector B_hadrons; vector allParticles = particles(event.genEvent()); for (size_t i = 0; i < allParticles.size(); i++) { const GenParticle* p = allParticles[i]; if (!PID::isHadron(p->pdg_id()) || !PID::hasBottom(p->pdg_id())) continue; if (p->momentum().perp() < 5*GeV) continue; B_hadrons.push_back(p); } // For each of the good jets, check whether any are b-jets (via dR matching) vector b_jets; foreach (const Jet* j, good_jets) { bool isbJet = false; foreach (const GenParticle* b, B_hadrons) { if (deltaR(j->momentum(), FourMomentum(b->momentum())) < 0.3) isbJet = true; } if (isbJet) b_jets.push_back(j); } // Check the good jets again and keep track of the "additional jets" // i.e. those which are not either of the 2 highest pT b-jets vector veto_jets; int n_bjets_matched = 0; foreach (const Jet* j, good_jets) { bool isBJet = false; foreach (const Jet* b, b_jets) { if (n_bjets_matched == 2) break; if (b == j){isBJet = true; ++ n_bjets_matched;} } if (!isBJet) veto_jets.push_back(j); } // Get the MET by taking the vector sum of all neutrinos /// @todo Use MissingMomentum instead? double MET = 0; FourMomentum p_MET; foreach (const Particle& p, neutrinoFS) { p_MET = p_MET + p.momentum(); } MET = p_MET.pT(); // Now we have everything we need to start doing the event selections bool passed_ee = false; vector vetoJets_ee; // We want exactly 2 electrons... if (elecFS.size() == 2) { // ... with opposite sign charges. if (charge(elecFS[0]) != charge(elecFS[1])) { // Check the MET if (MET >= 40*GeV) { // Do some dilepton mass cuts const double dilepton_mass = (elecFS[0].momentum() + elecFS[1].momentum()).mass(); if (dilepton_mass >= 15*GeV) { if (fabs(dilepton_mass - 91.0*GeV) >= 10.0*GeV) { // We need at least 2 b-jets if (b_jets.size() > 1) { // This event has passed all the cuts; passed_ee = true; } } } } } } bool passed_mumu = false; // Now do the same checks for the mumu channel vector vetoJets_mumu; // So we now want 2 good muons... if (muonFS.size() == 2) { // ...with opposite sign charges. if (charge(muonFS[0]) != charge(muonFS[1])) { // Check the MET if (MET >= 40*GeV) { // and do some di-muon mass cuts const double dilepton_mass = (muonFS.at(0).momentum() + muonFS.at(1).momentum()).mass(); if (dilepton_mass >= 15*GeV) { if (fabs(dilepton_mass - 91.0*GeV) >= 10.0*GeV) { // Need at least 2 b-jets if (b_jets.size() > 1) { // This event has passed all mumu-channel cuts passed_mumu = true; } } } } } } bool passed_emu = false; // Finally, the same again with the emu channel vector vetoJets_emu; // We want exactly 1 electron and 1 muon if (elecFS.size() == 1 && muonFS.size() == 1) { // With opposite sign charges if (charge(elecFS[0]) != charge(muonFS[0])) { // Calculate HT: scalar sum of the pTs of the leptons and all good jets double HT = 0; HT += elecFS[0].pT(); HT += muonFS[0].pT(); foreach (const Jet* j, good_jets) HT += fabs(j->pT()); // Keep events with HT > 130 GeV if (HT > 130.0*GeV) { // And again we want 2 or more b-jets if (b_jets.size() > 1) { passed_emu = true; } } } } if (passed_ee == true || passed_mumu == true || passed_emu == true) { // If the event passes the selection, we use it for all gap fractions m_total_weight += weight; // Loop over each veto jet foreach (const Jet* j, veto_jets) { const double pt = j->pT(); const double rapidity = fabs(j->rapidity()); // Loop over each region for (size_t i = 0; i < 4; ++i) { // If the jet falls into this region, get its pT and increment sum(pT) if (inRange(rapidity, m_plots[i].y_low, m_plots[i].y_high)) { m_plots[i].vetoJetPt_Qsum += pt; // If we've already got a veto jet, don't replace it if (m_plots[i].vetoJetPt_Q0 == 0.0) m_plots[i].vetoJetPt_Q0 = pt; } } } for (size_t i = 0; i < 4; ++i) { m_plots[i]._h_vetoJetPt_Q0->fill(m_plots[i].vetoJetPt_Q0, weight); m_plots[i]._h_vetoJetPt_Qsum->fill(m_plots[i].vetoJetPt_Qsum, weight); m_plots[i].vetoJetPt_Q0 = 0.0; m_plots[i].vetoJetPt_Qsum = 0.0; } } } /// Normalise histograms etc., after the run void finalize() { for (size_t i = 0; i < 4; ++i) { finalizeGapFraction(m_total_weight, m_plots[i]._d_gapFraction_Q0, m_plots[i]._h_vetoJetPt_Q0); finalizeGapFraction(m_total_weight, m_plots[i]._d_gapFraction_Qsum, m_plots[i]._h_vetoJetPt_Qsum); } } /// Convert temporary histos to cumulative efficiency scatters /// @todo Should be possible to replace this with a couple of YODA one-lines for diff -> integral and "efficiency division" void finalizeGapFraction(double total_weight, Scatter2DPtr gapFraction, Histo1DPtr vetoPt) { // Stores the cumulative frequency of the veto jet pT histogram double vetoPtWeightSum = 0.0; // Keep track of which gap fraction point we're currently populating (#final_points != #tmp_bins) size_t fgap_point = 0; for (size_t i = 0; i < vetoPt->numBins(); ++i) { // If we've done the last "final" point, stop if (fgap_point == gapFraction->numPoints()) break; // Increment the cumulative vetoPt counter for this temp histo bin /// @todo Get rid of this and use vetoPt->integral(i+1) when points and bins line up? vetoPtWeightSum += vetoPt->bin(i).sumW(); // If this temp histo bin's upper edge doesn't correspond to the reference point, don't finalise the scatter. // Note that points are ON the bin edges and have no width: they represent the integral up to exactly that point. if ( !fuzzyEquals(vetoPt->bin(i).xMax(), gapFraction->point(fgap_point).x()) ) continue; // Calculate the gap fraction and its uncertainty const double frac = (total_weight != 0.0) ? vetoPtWeightSum/total_weight : 0; const double fracErr = (total_weight != 0.0) ? sqrt(frac*(1-frac)/total_weight) : 0; gapFraction->point(fgap_point).setY(frac, fracErr); ++fgap_point; } } private: // Weight counter double m_total_weight; // Structs containing all the plots, for each event selection ATLAS_2012_I1094568_Plots m_plots[4]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1094568); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc @@ -1,373 +1,373 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/InvMassFinalState.hh" namespace Rivet { /// Generic Z candidate struct Zstate : public ParticlePair { Zstate() { } Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { } FourMomentum mom() const { return first.momentum() + second.momentum(); } operator FourMomentum() const { return mom(); } static bool cmppT(const Zstate& lx, const Zstate& rx) { return lx.mom().pT() < rx.mom().pT(); } }; /// @name ZZ analysis class ATLAS_2012_I1203852 : public Analysis { public: /// Default constructor ATLAS_2012_I1203852() : Analysis("ATLAS_2012_I1203852") { } void init() { // NB Missing ET is not required to be neutrinos FinalState fs(-5.0, 5.0, 0.0*GeV); // Final states to form Z bosons vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON)); vids.push_back(make_pair(PID::MUON, PID::ANTIMUON)); IdentifiedFinalState Photon(fs); Photon.acceptIdPair(PID::PHOTON); IdentifiedFinalState bare_EL(fs); bare_EL.acceptIdPair(PID::ELECTRON); IdentifiedFinalState bare_MU(fs); bare_MU.acceptIdPair(PID::MUON); // Selection 1: ZZ-> llll selection Cut etaranges_lep = Cuts::abseta < 3.16 && Cuts::pT > 7*GeV; DressedLeptons electron_sel4l(Photon, bare_EL, 0.1, etaranges_lep); declare(electron_sel4l, "ELECTRON_sel4l"); DressedLeptons muon_sel4l(Photon, bare_MU, 0.1, etaranges_lep); declare(muon_sel4l, "MUON_sel4l"); // Selection 2: ZZ-> llnunu selection Cut etaranges_lep2 = Cuts::abseta < 2.5 && Cuts::pT > 10*GeV; DressedLeptons electron_sel2l2nu(Photon, bare_EL, 0.1, etaranges_lep2); declare(electron_sel2l2nu, "ELECTRON_sel2l2nu"); DressedLeptons muon_sel2l2nu(Photon, bare_MU, 0.1, etaranges_lep2); declare(muon_sel2l2nu, "MUON_sel2l2nu"); /// Get all neutrinos. These will not be used to form jets. IdentifiedFinalState neutrino_fs(Cuts::abseta < 4.5); neutrino_fs.acceptNeutrinos(); declare(neutrino_fs, "NEUTRINO_FS"); // Calculate missing ET from the visible final state, not by requiring neutrinos addProjection(MissingMomentum(Cuts::abseta < 4.5), "MISSING"); VetoedFinalState jetinput; jetinput.addVetoOnThisFinalState(bare_MU); jetinput.addVetoOnThisFinalState(neutrino_fs); FastJets jetpro(fs, FastJets::ANTIKT, 0.4); declare(jetpro, "jet"); // Both ZZ on-shell histos _h_ZZ_xsect = bookHisto1D(1, 1, 1); _h_ZZ_ZpT = bookHisto1D(3, 1, 1); _h_ZZ_phill = bookHisto1D(5, 1, 1); _h_ZZ_mZZ = bookHisto1D(7, 1, 1); // One Z off-shell (ZZstar) histos _h_ZZs_xsect = bookHisto1D(1, 1, 2); // ZZ -> llnunu histos _h_ZZnunu_xsect = bookHisto1D(1, 1, 3); _h_ZZnunu_ZpT = bookHisto1D(4, 1, 1); _h_ZZnunu_phill = bookHisto1D(6, 1, 1); _h_ZZnunu_mZZ = bookHisto1D(8, 1, 1); } /// Do the analysis void analyze(const Event& e) { const double weight = e.weight(); //////////////////////////////////////////////////////////////////// // preselection of leptons for ZZ-> llll final state //////////////////////////////////////////////////////////////////// Particles leptons_sel4l; const vector& mu_sel4l = apply(e, "MUON_sel4l").dressedLeptons(); const vector& el_sel4l = apply(e, "ELECTRON_sel4l").dressedLeptons(); vector leptonsFS_sel4l; leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() ); leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() ); //////////////////////////////////////////////////////////////////// // OVERLAP removal dR(l,l)>0.2 //////////////////////////////////////////////////////////////////// foreach ( const DressedLepton& l1, leptonsFS_sel4l) { bool isolated = true; foreach (DressedLepton& l2, leptonsFS_sel4l) { const double dR = deltaR(l1, l2); if (dR < 0.2 && l1 != l2) { isolated = false; break; } } if (isolated) leptons_sel4l.push_back(l1); } ////////////////////////////////////////////////////////////////// // Exactly two opposite charged leptons ////////////////////////////////////////////////////////////////// // calculate total 'flavour' charge double totalcharge = 0; foreach (Particle& l, leptons_sel4l) totalcharge += l.pid(); // Analyze 4 lepton events if (leptons_sel4l.size() == 4 && totalcharge == 0 ) { Zstate Z1, Z2; // Identifies Z states from 4 lepton pairs identifyZstates(Z1, Z2,leptons_sel4l); //////////////////////////////////////////////////////////////////////////// // Z MASS WINDOW // -ZZ: for both Z: 6620 GeV /////////////////////////////////////////////////////////////////////////// Zstate leadPtZ = std::max(Z1, Z2, Zstate::cmppT); double mZ1 = Z1.mom().mass(); double mZ2 = Z2.mom().mass(); double ZpT = leadPtZ.mom().pT(); double phill = fabs(deltaPhi(leadPtZ.first, leadPtZ.second)); if (phill > M_PI) phill = 2*M_PI-phill; double mZZ = (Z1.mom() + Z2.mom()).mass(); if (mZ1 > 20*GeV && mZ2 > 20*GeV) { // ZZ* selection if (inRange(mZ1, 66*GeV, 116*GeV) || inRange(mZ2, 66*GeV, 116*GeV)) { _h_ZZs_xsect -> fill(sqrtS()*GeV, weight); } // ZZ selection if (inRange(mZ1, 66*GeV, 116*GeV) && inRange(mZ2, 66*GeV, 116*GeV)) { _h_ZZ_xsect -> fill(sqrtS()*GeV, weight); _h_ZZ_ZpT -> fill(ZpT , weight); _h_ZZ_phill -> fill(phill , weight); _h_ZZ_mZZ -> fill(mZZ , weight); } } } //////////////////////////////////////////////////////////////////// /// preselection of leptons for ZZ-> llnunu final state //////////////////////////////////////////////////////////////////// Particles leptons_sel2l2nu; // output const vector& mu_sel2l2nu = apply(e, "MUON_sel2l2nu").dressedLeptons(); const vector& el_sel2l2nu = apply(e, "ELECTRON_sel2l2nu").dressedLeptons(); vector leptonsFS_sel2l2nu; leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), mu_sel2l2nu.begin(), mu_sel2l2nu.end() ); leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), el_sel2l2nu.begin(), el_sel2l2nu.end() ); // Lepton preselection for ZZ-> llnunu if ((mu_sel2l2nu.empty() || el_sel2l2nu.empty()) // cannot have opposite flavour && (leptonsFS_sel2l2nu.size() == 2) // exactly two leptons && (leptonsFS_sel2l2nu[0].charge() * leptonsFS_sel2l2nu[1].charge() < 1 ) // opposite charge && (deltaR(leptonsFS_sel2l2nu[0], leptonsFS_sel2l2nu[1]) > 0.3) // overlap removal && (leptonsFS_sel2l2nu[0].pT() > 20*GeV && leptonsFS_sel2l2nu[1].pT() > 20*GeV)) { // trigger requirement leptons_sel2l2nu.insert(leptons_sel2l2nu.end(), leptonsFS_sel2l2nu.begin(), leptonsFS_sel2l2nu.end()); } if (leptons_sel2l2nu.empty()) vetoEvent; // no further analysis, fine to veto Particles leptons_sel2l2nu_jetveto; foreach (const DressedLepton& l, mu_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton()); foreach (const DressedLepton& l, el_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton()); double ptll = (leptons_sel2l2nu[0].momentum() + leptons_sel2l2nu[1].momentum()).pT(); // Find Z1-> ll FinalState fs2(-3.2, 3.2); InvMassFinalState imfs(fs2, vids, 20*GeV, sqrtS()); imfs.calc(leptons_sel2l2nu); if (imfs.particlePairs().size() != 1) vetoEvent; const ParticlePair& Z1constituents = imfs.particlePairs()[0]; FourMomentum Z1 = Z1constituents.first.momentum() + Z1constituents.second.momentum(); // Z to neutrinos candidate from missing ET const MissingMomentum & missmom = applyProjection(e, "MISSING"); const FourMomentum Z2 = missmom.missingMomentum(ZMASS); double met_Znunu = missmom.missingEt(); //Z2.pT(); // mTZZ const double mT2_1st_term = add_quad(ZMASS, ptll) + add_quad(ZMASS, met_Znunu); const double mT2_2nd_term = Z1.px() + Z2.px(); const double mT2_3rd_term = Z1.py() + Z2.py(); const double mTZZ = sqrt(sqr(mT2_1st_term) - sqr(mT2_2nd_term) - sqr(mT2_3rd_term)); if (!inRange(Z2.mass(), 66*GeV, 116*GeV)) vetoEvent; if (!inRange(Z1.mass(), 76*GeV, 106*GeV)) vetoEvent; ///////////////////////////////////////////////////////////// // AXIAL MET < 75 GeV //////////////////////////////////////////////////////////// double dPhiZ1Z2 = fabs(deltaPhi(Z1, Z2)); if (dPhiZ1Z2 > M_PI) dPhiZ1Z2 = 2*M_PI - dPhiZ1Z2; const double axialEtmiss = -Z2.pT()*cos(dPhiZ1Z2); if (axialEtmiss < 75*GeV) vetoEvent; const double ZpT = Z1.pT(); double phill = fabs(deltaPhi(Z1constituents.first, Z1constituents.second)); if (phill > M_PI) phill = 2*M_PI - phill; //////////////////////////////////////////////////////////////////////////// // JETS // -"j": found by "jetpro" projection && pT() > 25 GeV && |eta| < 4.5 // -"goodjets": "j" && dR(electron/muon,jet) > 0.3 // // JETVETO: veto all events with at least one good jet /////////////////////////////////////////////////////////////////////////// vector good_jets; foreach (const Jet& j, apply(e, "jet").jetsByPt(25)) { if (j.abseta() > 4.5) continue; bool isLepton = 0; foreach (const Particle& l, leptons_sel2l2nu_jetveto) { const double dR = deltaR(l.momentum(), j.momentum()); if (dR < 0.3) { isLepton = true; break; } } if (!isLepton) good_jets.push_back(j); } size_t n_sel_jets = good_jets.size(); if (n_sel_jets != 0) vetoEvent; ///////////////////////////////////////////////////////////// // Fractional MET and lepton pair difference: "RatioMet"< 0.4 //////////////////////////////////////////////////////////// double ratioMet = fabs(Z2.pT() - Z1.pT()) / Z1.pT(); if (ratioMet > 0.4 ) vetoEvent; // End of ZZllnunu selection: now fill histograms _h_ZZnunu_xsect->fill(sqrtS()/GeV, weight); _h_ZZnunu_ZpT ->fill(ZpT, weight); _h_ZZnunu_phill->fill(phill, weight); _h_ZZnunu_mZZ ->fill(mTZZ, weight); } /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn; scale(_h_ZZ_xsect, norm); normalize(_h_ZZ_ZpT); normalize(_h_ZZ_phill); normalize(_h_ZZ_mZZ); scale(_h_ZZs_xsect, norm); scale(_h_ZZnunu_xsect, norm); normalize(_h_ZZnunu_ZpT); normalize(_h_ZZnunu_phill); normalize(_h_ZZnunu_mZZ); } private: void identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l); Histo1DPtr _h_ZZ_xsect, _h_ZZ_ZpT, _h_ZZ_phill, _h_ZZ_mZZ; Histo1DPtr _h_ZZs_xsect; Histo1DPtr _h_ZZnunu_xsect, _h_ZZnunu_ZpT, _h_ZZnunu_phill, _h_ZZnunu_mZZ; vector< pair > vids; const double ZMASS = 91.1876; // GeV }; /// 4l to ZZ assignment -- algorithm void ATLAS_2012_I1203852::identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l) { ///////////////////////////////////////////////////////////////////////////// /// ZZ->4l pairing /// - Exactly two same flavour opposite charged leptons /// - Ambiguities in pairing are resolved by choosing the combination /// that results in the smaller value of the sum |mll - mZ| for the two pairs ///////////////////////////////////////////////////////////////////////////// Particles part_pos_el, part_neg_el, part_pos_mu, part_neg_mu; foreach (const Particle& l , leptons_sel4l) { if (l.abspid() == PID::ELECTRON) { if (l.pid() < 0) part_neg_el.push_back(l); if (l.pid() > 0) part_pos_el.push_back(l); } else if (l.abspid() == PID::MUON) { if (l.pid() < 0) part_neg_mu.push_back(l); if (l.pid() > 0) part_pos_mu.push_back(l); } } // ee/mm channel if ( part_neg_el.size() == 2 || part_neg_mu.size() == 2) { Zstate Zcand_1, Zcand_2, Zcand_3, Zcand_4; if (part_neg_el.size() == 2) { // ee Zcand_1 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[1] ) ); Zcand_3 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[0] ) ); Zcand_4 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[1] ) ); } else { // mumu Zcand_1 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[1] ) ); Zcand_3 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[0] ) ); Zcand_4 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[1] ) ); } // We can have the following pairs: (Z1 + Z4) || (Z2 + Z3) double minValue_1, minValue_2; minValue_1 = fabs( Zcand_1.mom().mass() - ZMASS ) + fabs( Zcand_4.mom().mass() - ZMASS); minValue_2 = fabs( Zcand_2.mom().mass() - ZMASS ) + fabs( Zcand_3.mom().mass() - ZMASS); if (minValue_1 < minValue_2 ) { Z1 = Zcand_1; Z2 = Zcand_4; } else { Z1 = Zcand_2; Z2 = Zcand_3; } // emu channel } else if (part_neg_mu.size() == 1 && part_neg_el.size() == 1) { Z1 = Zstate ( ParticlePair (part_neg_mu[0], part_pos_mu[0] ) ); Z2 = Zstate ( ParticlePair (part_neg_el[0], part_pos_el[0] ) ); } } // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1203852); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1204447.cc b/analyses/pluginATLAS/ATLAS_2012_I1204447.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1204447.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1204447.cc @@ -1,1060 +1,1060 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2012_I1204447 : public Analysis { public: /// Constructor ATLAS_2012_I1204447() : Analysis("ATLAS_2012_I1204447") { } /// Book histograms and initialise projections before the run void init() { // To calculate the acceptance without having the fiducial lepton efficiencies included, this part can be turned off _use_fiducial_lepton_efficiency = true; // Random numbers for simulation of ATLAS detector reconstruction efficiency srand(160385); // Read in all signal regions _signal_regions = getSignalRegions(); // Set number of events per signal region to 0 for (size_t i = 0; i < _signal_regions.size(); i++) _eventCountsPerSR[_signal_regions[i]] = 0.0; // Final state including all charged and neutral particles const FinalState fs(-5.0, 5.0, 1*GeV); declare(fs, "FS"); // Final state including all charged particles declare(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 1*GeV), "CFS"); // Final state including all visible particles (to calculate MET, Jets etc.) declare(VisibleFinalState(Cuts::abseta < 5.0), "VFS"); // Final state including all AntiKt 04 Jets VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // Final state including all unstable particles (including taus) - declare(UnstableFinalState(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV), "UFS"); + declare(UnstableParticles(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV), "UFS"); // Final state including all electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Final state including all muons IdentifiedFinalState muons(Cuts::abseta < 2.5 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Book histograms _h_HTlep_all = bookHisto1D("HTlep_all" , 30, 0, 1500); _h_HTjets_all = bookHisto1D("HTjets_all", 30, 0, 1500); _h_MET_all = bookHisto1D("MET_all" , 20, 0, 1000); _h_Meff_all = bookHisto1D("Meff_all" , 30, 0, 3000); _h_e_n = bookHisto1D("e_n" , 10, -0.5, 9.5); _h_mu_n = bookHisto1D("mu_n" , 10, -0.5, 9.5); _h_tau_n = bookHisto1D("tau_n", 10, -0.5, 9.5); _h_pt_1_3l = bookHisto1D("pt_1_3l", 100, 0, 2000); _h_pt_2_3l = bookHisto1D("pt_2_3l", 100, 0, 2000); _h_pt_3_3l = bookHisto1D("pt_3_3l", 100, 0, 2000); _h_pt_1_2ltau = bookHisto1D("pt_1_2ltau", 100, 0, 2000); _h_pt_2_2ltau = bookHisto1D("pt_2_2ltau", 100, 0, 2000); _h_pt_3_2ltau = bookHisto1D("pt_3_2ltau", 100, 0, 2000); _h_excluded = bookHisto1D("excluded", 2, -0.5, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { // Muons Particles muon_candidates; const Particles charged_tracks = apply(event, "CFS").particles(); const Particles visible_particles = apply(event, "VFS").particles(); foreach (const Particle& mu, apply(event, "muons").particlesByPt()) { // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of muon itself) double pTinCone = -mu.pT(); foreach (const Particle& track, charged_tracks) { if (deltaR(mu.momentum(), track.momentum()) < 0.3) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles within dR<0.3) double eTinCone = 0.; foreach (const Particle& visible_particle, visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(mu.momentum(), visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reco int muon_id = 13; if ( mu.hasAncestor(15) || mu.hasAncestor(-15)) muon_id = 14; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(muon_id, mu) : 1.0; const bool keep_muon = rand()/static_cast(RAND_MAX) <= eff; // Keep muon if pTCone30/pT < 0.15 and eTCone30/pT < 0.2 and reconstructed if (keep_muon && pTinCone/mu.pT() <= 0.15 && eTinCone/mu.pT() < 0.2) muon_candidates.push_back(mu); } // Electrons Particles electron_candidates; foreach (const Particle& e, apply(event, "elecs").particlesByPt()) { // Neglect electrons in crack regions if (inRange(e.abseta(), 1.37, 1.52)) continue; // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of electron itself) double pTinCone = -e.pT(); foreach (const Particle& track, charged_tracks) { if (deltaR(e.momentum(), track.momentum()) < 0.3) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles (except muons) within dR<0.3) double eTinCone = 0.; foreach (const Particle& visible_particle, visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(e.momentum(), visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reco int elec_id = 11; if (e.hasAncestor(15) || e.hasAncestor(-15)) elec_id = 12; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(elec_id, e) : 1.0; const bool keep_elec = rand()/static_cast(RAND_MAX) <= eff; // Keep electron if pTCone30/pT < 0.13 and eTCone30/pT < 0.2 and reconstructed if (keep_elec && pTinCone/e.pT() <= 0.13 && eTinCone/e.pT() < 0.2) electron_candidates.push_back(e); } // Taus /// @todo This could benefit from a tau finder projection Particles tau_candidates; - foreach (const Particle& tau, apply(event, "UFS").particlesByPt()) { + foreach (const Particle& tau, apply(event, "UFS").particlesByPt()) { // Only pick taus out of all unstable particles if (tau.abspid() != PID::TAU) continue; // Check that tau has decayed into daughter particles /// @todo Huh? Unstable taus with no decay vtx? Can use Particle.isStable()? But why in this situation? if (tau.genParticle()->end_vertex() == 0) continue; // Calculate visible tau pT from pT of tau neutrino in tau decay for pT and |eta| cuts FourMomentum daughter_tau_neutrino_momentum = get_tau_neutrino_mom(tau); Particle tau_vis = tau; tau_vis.setMomentum(tau.momentum()-daughter_tau_neutrino_momentum); // keep only taus in certain eta region and above 15 GeV of visible tau pT if ( tau_vis.pT() <= 15.0*GeV || tau_vis.abseta() > 2.5) continue; // Get prong number (number of tracks) in tau decay and check if tau decays leptonically unsigned int nprong = 0; bool lep_decaying_tau = false; get_prong_number(tau.genParticle(), nprong, lep_decaying_tau); // Apply reconstruction efficiency int tau_id = 15; if (nprong == 1) tau_id = 15; else if (nprong == 3) tau_id = 16; // Get fiducial lepton efficiency simulate reco efficiency const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(tau_id, tau_vis) : 1.0; const bool keep_tau = rand()/static_cast(RAND_MAX) <= eff; // Keep tau if nprong = 1, it decays hadronically, and it's reconstructed by the detector if ( !lep_decaying_tau && nprong == 1 && keep_tau) tau_candidates.push_back(tau_vis); } // Jets (all anti-kt R=0.4 jets with pT > 25 GeV and eta < 4.9) Jets jet_candidates; foreach (const Jet& jet, apply(event, "AntiKtJets04").jetsByPt(25*GeV)) { if (jet.abseta() < 4.9) jet_candidates.push_back(jet); } // ETmiss Particles vfs_particles = apply(event, "VFS").particles(); FourMomentum pTmiss; foreach (const Particle& p, vfs_particles) pTmiss -= p.momentum(); double eTmiss = pTmiss.pT()/GeV; //------------------ // Overlap removal // electron - electron Particles electron_candidates_2; for (size_t ie = 0; ie < electron_candidates.size(); ++ie) { const Particle & e = electron_candidates[ie]; bool away = true; // If electron pair within dR < 0.1: remove electron with lower pT for (size_t ie2=0; ie2 < electron_candidates_2.size(); ++ie2) { if ( deltaR( e.momentum(), electron_candidates_2[ie2].momentum()) < 0.1 ) { away = false; break; } } // If isolated keep it if ( away ) electron_candidates_2.push_back( e ); } // jet - electron Jets recon_jets; foreach (const Jet& jet, jet_candidates) { bool away = true; // if jet within dR < 0.2 of electron: remove jet foreach (const Particle& e, electron_candidates_2) { if (deltaR(e.momentum(), jet.momentum()) < 0.2) { away = false; break; } } // jet - tau if (away) { // If jet within dR < 0.2 of tau: remove jet foreach (const Particle& tau, tau_candidates) { if (deltaR(tau.momentum(), jet.momentum()) < 0.2) { away = false; break; } } } // If isolated keep it if ( away ) recon_jets.push_back( jet ); } // electron - jet Particles recon_leptons, recon_e; for (size_t ie = 0; ie < electron_candidates_2.size(); ++ie) { const Particle& e = electron_candidates_2[ie]; // If electron within 0.2 < dR < 0.4 from any jets: remove electron bool away = true; foreach (const Jet& jet, recon_jets) { if (deltaR(e.momentum(), jet.momentum()) < 0.4) { away = false; break; } } // electron - muon // if electron within dR < 0.1 of a muon: remove electron if (away) { foreach (const Particle& mu, muon_candidates) { if (deltaR(mu.momentum(), e.momentum()) < 0.1) { away = false; break; } } } // If isolated keep it if (away) { recon_e += e; recon_leptons += e; } } // tau - electron Particles recon_tau; foreach ( const Particle& tau, tau_candidates ) { bool away = true; // If tau within dR < 0.2 of an electron: remove tau foreach ( const Particle& e, recon_e ) { if (deltaR( tau.momentum(), e.momentum()) < 0.2) { away = false; break; } } // tau - muon // If tau within dR < 0.2 of a muon: remove tau if (away) { foreach (const Particle& mu, muon_candidates) { if (deltaR(tau.momentum(), mu.momentum()) < 0.2) { away = false; break; } } } // If isolated keep it if (away) recon_tau.push_back( tau ); } // Muon - jet isolation Particles recon_mu, trigger_mu; // If muon within dR < 0.4 of a jet, remove muon foreach (const Particle& mu, muon_candidates) { bool away = true; foreach (const Jet& jet, recon_jets) { if ( deltaR( mu.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } if (away) { recon_mu.push_back( mu ); recon_leptons.push_back( mu ); if (mu.abseta() < 2.4) trigger_mu.push_back( mu ); } } // End overlap removal //------------------ // Jet cleaning if (rand()/static_cast(RAND_MAX) <= 0.42) { foreach (const Jet& jet, recon_jets) { const double eta = jet.rapidity(); const double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if (jet.pT() > 25*GeV && inRange(eta, -0.1, 1.5) && inRange(phi, -0.9, -0.5)) vetoEvent; } } // Post-isolation event cuts // Require at least 3 charged tracks in event if (charged_tracks.size() < 3) vetoEvent; // And at least one e/mu passing trigger if (!( !recon_e .empty() && recon_e[0] .pT() > 25*GeV) && !( !trigger_mu.empty() && trigger_mu[0].pT() > 25*GeV) ) { MSG_DEBUG("Hardest lepton fails trigger"); vetoEvent; } // And only accept events with at least 2 electrons and muons and at least 3 leptons in total if (recon_mu.size() + recon_e.size() + recon_tau.size() < 3 || recon_leptons.size() < 2) vetoEvent; // Now it's worth getting the event weight const double weight = event.weight(); // Sort leptons by decreasing pT sortByPt(recon_leptons); sortByPt(recon_tau); // Calculate HTlep, fill lepton pT histograms & store chosen combination of 3 leptons double HTlep = 0.; Particles chosen_leptons; if ( recon_leptons.size() > 2 ) { _h_pt_1_3l->fill(recon_leptons[0].perp()/GeV, weight); _h_pt_2_3l->fill(recon_leptons[1].perp()/GeV, weight); _h_pt_3_3l->fill(recon_leptons[2].perp()/GeV, weight); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_leptons[2].pT())/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_leptons[2] ); } else { _h_pt_1_2ltau->fill(recon_leptons[0].perp()/GeV, weight); _h_pt_2_2ltau->fill(recon_leptons[1].perp()/GeV, weight); _h_pt_3_2ltau->fill(recon_tau[0].perp()/GeV, weight); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_tau[0].pT())/GeV ; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_tau[0] ); } // Number of prompt e/mu and had taus _h_e_n ->fill(recon_e.size() , weight); _h_mu_n ->fill(recon_mu.size() , weight); _h_tau_n->fill(recon_tau.size(), weight); // Calculate HTjets double HTjets = 0.; foreach ( const Jet & jet, recon_jets ) HTjets += jet.perp()/GeV; // Calculate meff double meff = eTmiss + HTjets; Particles all_leptons; foreach ( const Particle & e , recon_e ) { meff += e.perp()/GeV; all_leptons.push_back( e ); } foreach ( const Particle & mu, recon_mu ) { meff += mu.perp()/GeV; all_leptons.push_back( mu ); } foreach ( const Particle & tau, recon_tau ) { meff += tau.perp()/GeV; all_leptons.push_back( tau ); } // Fill histogram of kinematic variables _h_HTlep_all ->fill(HTlep , weight); _h_HTjets_all->fill(HTjets, weight); _h_MET_all ->fill(eTmiss, weight); _h_Meff_all ->fill(meff , weight); // Determine signal region (3l/2ltau, onZ/offZ) string basic_signal_region; if ( recon_mu.size() + recon_e.size() > 2 ) basic_signal_region += "3l_"; else if ( (recon_mu.size() + recon_e.size() == 2) && (recon_tau.size() > 0)) basic_signal_region += "2ltau_"; // Is there an OSSF pair or a three lepton combination with an invariant mass close to the Z mass int onZ = isonZ(chosen_leptons); if (onZ == 1) basic_signal_region += "onZ"; else if (onZ == 0) basic_signal_region += "offZ"; // Check in which signal regions this event falls and adjust event counters fillEventCountsPerSR(basic_signal_region, onZ, HTlep, eTmiss, HTjets, meff, weight); } /// Normalise histograms etc., after the run void finalize() { // Normalize to an integrated luminosity of 1 fb-1 double norm = crossSection()/femtobarn/sumOfWeights(); string best_signal_region = ""; double ratio_best_SR = 0.; // Loop over all signal regions and find signal region with best sensitivity (ratio signal events/visible cross-section) for (size_t i = 0; i < _signal_regions.size(); i++) { double signal_events = _eventCountsPerSR[_signal_regions[i]] * norm; // Use expected upper limits to find best signal region double UL95 = getUpperLimit(_signal_regions[i], false); double ratio = signal_events / UL95; if (ratio > ratio_best_SR) { best_signal_region = _signal_regions[i]; ratio_best_SR = ratio; } } double signal_events_best_SR = _eventCountsPerSR[best_signal_region] * norm; double exp_UL_best_SR = getUpperLimit(best_signal_region, false); double obs_UL_best_SR = getUpperLimit(best_signal_region, true); // Print out result cout << "----------------------------------------------------------------------------------------" << endl; cout << "Best signal region: " << best_signal_region << endl; cout << "Normalized number of signal events in this best signal region (per fb-1): " << signal_events_best_SR << endl; cout << "Efficiency*Acceptance: " << _eventCountsPerSR[best_signal_region]/sumOfWeights() << endl; cout << "Cross-section [fb]: " << crossSection()/femtobarn << endl; cout << "Expected visible cross-section (per fb-1): " << exp_UL_best_SR << endl; cout << "Ratio (signal events / expected visible cross-section): " << ratio_best_SR << endl; cout << "Observed visible cross-section (per fb-1): " << obs_UL_best_SR << endl; cout << "Ratio (signal events / observed visible cross-section): " << signal_events_best_SR/obs_UL_best_SR << endl; cout << "----------------------------------------------------------------------------------------" << endl; cout << "Using the EXPECTED limits (visible cross-section) of the analysis: " << endl; if (signal_events_best_SR > exp_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% CL." << endl; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << endl; cout << "Using the OBSERVED limits (visible cross-section) of the analysis: " << endl; if (signal_events_best_SR > obs_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% CL." << endl; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << endl; // Normalize to cross section if (norm != 0) { scale(_h_HTlep_all, norm); scale(_h_HTjets_all, norm); scale(_h_MET_all, norm); scale(_h_Meff_all, norm); scale(_h_pt_1_3l, norm); scale(_h_pt_2_3l, norm); scale(_h_pt_3_3l, norm); scale(_h_pt_1_2ltau, norm); scale(_h_pt_2_2ltau, norm); scale(_h_pt_3_2ltau, norm); scale(_h_e_n, norm); scale(_h_mu_n, norm); scale(_h_tau_n, norm); scale(_h_excluded, signal_events_best_SR); } } /// Helper functions //@{ /// Function giving a list of all signal regions vector getSignalRegions() { // List of basic signal regions vector basic_signal_regions; basic_signal_regions.push_back("3l_offZ"); basic_signal_regions.push_back("3l_onZ"); basic_signal_regions.push_back("2ltau_offZ"); basic_signal_regions.push_back("2ltau_onZ"); // List of kinematic variables vector kinematic_variables; kinematic_variables.push_back("HTlep"); kinematic_variables.push_back("METStrong"); kinematic_variables.push_back("METWeak"); kinematic_variables.push_back("Meff"); kinematic_variables.push_back("MeffStrong"); vector signal_regions; // Loop over all kinematic variables and basic signal regions for (size_t i0 = 0; i0 < kinematic_variables.size(); i0++) { for (size_t i1 = 0; i1 < basic_signal_regions.size(); i1++) { // Is signal region onZ? int onZ = (basic_signal_regions[i1].find("onZ") != string::npos) ? 1 : 0; // Get cut values for this kinematic variable vector cut_values = getCutsPerSignalRegion(kinematic_variables[i0], onZ); // Loop over all cut values for (size_t i2 = 0; i2 < cut_values.size(); i2++) { // push signal region into vector signal_regions.push_back( (kinematic_variables[i0] + "_" + basic_signal_regions[i1] + "_cut_" + toString(i2)) ); } } } return signal_regions; } /// Function giving all cut vales per kinematic variable (taking onZ for MET into account) vector getCutsPerSignalRegion(const string& signal_region, int onZ=0) { vector cutValues; // Cut values for HTlep if (signal_region.compare("HTlep") == 0) { cutValues.push_back(0); cutValues.push_back(100); cutValues.push_back(150); cutValues.push_back(200); cutValues.push_back(300); } // Cut values for METStrong (HTjets > 100 GeV) and METWeak (HTjets < 100 GeV) else if (signal_region.compare("METStrong") == 0 || signal_region.compare("METWeak") == 0) { if (onZ == 0) cutValues.push_back(0); else if (onZ == 1) cutValues.push_back(20); cutValues.push_back(50); cutValues.push_back(75); } // Cut values for Meff and MeffStrong (MET > 75 GeV) if (signal_region.compare("Meff") == 0 || signal_region.compare("MeffStrong") == 0) { cutValues.push_back(0); cutValues.push_back(150); cutValues.push_back(300); cutValues.push_back(500); } return cutValues; } /// function fills map EventCountsPerSR by looping over all signal regions /// and looking if the event falls into this signal region void fillEventCountsPerSR(const string& basic_signal_region, int onZ, double HTlep, double eTmiss, double HTjets, double meff, double weight) { // Get cut values for HTlep, loop over them and add event if cut is passed vector cut_values = getCutsPerSignalRegion("HTlep", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (HTlep > cut_values[i]) _eventCountsPerSR[("HTlep_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for METStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets > 100.) _eventCountsPerSR[("METStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for METWeak, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METWeak", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets <= 100.) _eventCountsPerSR[("METWeak_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for Meff, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("Meff", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i]) _eventCountsPerSR[("Meff_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for MeffStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && eTmiss > 75.) _eventCountsPerSR[("MeffStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } } /// Function returning 4-vector of daughter-particle if it is a tau neutrino /// @todo Move to TauFinder and make less HepMC-ish FourMomentum get_tau_neutrino_mom(const Particle& p) { assert(p.abspid() == PID::TAU); const GenVertex* dv = p.genParticle()->end_vertex(); assert(dv != NULL); for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { if (abs((*pp)->pdg_id()) == PID::NU_TAU) return FourMomentum((*pp)->momentum()); } return FourMomentum(); } /// Function calculating the prong number of taus /// @todo Move to TauFinder and make less HepMC-ish void get_prong_number(const GenParticle* p, unsigned int& nprong, bool& lep_decaying_tau) { assert(p != NULL); //const int tau_barcode = p->barcode(); const GenVertex* dv = p->end_vertex(); assert(dv != NULL); for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { // If they have status 1 and are charged they will produce a track and the prong number is +1 if ((*pp)->status() == 1 ) { const int id = (*pp)->pdg_id(); if (Rivet::PID::charge(id) != 0 ) ++nprong; // Check if tau decays leptonically // @todo Can a tau decay include a tau in its decay daughters?! if ((abs(id) == PID::ELECTRON || abs(id) == PID::MUON || abs(id) == PID::TAU) && abs(p->pdg_id()) == PID::TAU) lep_decaying_tau = true; } // If the status of the daughter particle is 2 it is unstable and the further decays are checked else if ((*pp)->status() == 2 ) { get_prong_number(*pp, nprong, lep_decaying_tau); } } } /// Function giving fiducial lepton efficiency double apply_reco_eff(int flavor, const Particle& p) { float pt = p.pT()/GeV; float eta = p.eta(); double eff = 0.; //double err = 0.; if (flavor == 11) { // weight prompt electron -- now including data/MC ID SF in eff. //float rho = 0.820; float p0 = 7.34; float p1 = 0.8977; //float ep0= 0.5 ; float ep1= 0.0087; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); double avgrate = 0.6867; float wz_ele_eta[] = {0.588717,0.603674,0.666135,0.747493,0.762202,0.675051,0.751606,0.745569,0.665333,0.610432,0.592693,}; //float ewz_ele_eta[] ={0.00292902,0.002476,0.00241209,0.00182319,0.00194339,0.00299785,0.00197339,0.00182004,0.00241793,0.00245997,0.00290394,}; int ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_ele_eta[ibin]; //double err_eta = ewz_ele_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 12) { // weight electron from tau //float rho = 0.884; float p0 = 6.799; float p1 = 0.842; //float ep0= 0.664; float ep1= 0.016; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); double avgrate = 0.5319; float wz_elet_eta[] = {0.468945,0.465953,0.489545,0.58709,0.59669,0.515829,0.59284,0.575828,0.498181,0.463536,0.481738,}; //float ewz_elet_eta[] ={0.00933795,0.00780868,0.00792679,0.00642083,0.00692652,0.0101568,0.00698452,0.00643524,0.0080002,0.00776238,0.0094699,}; int ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_elet_eta[ibin]; //double err_eta = ewz_elet_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 13) {// weight prompt muon //if eta>0.1 float p0 = -18.21; float p1 = 14.83; float p2 = 0.9312; //float ep0= 5.06; float ep1= 1.9; float ep2=0.00069; if ( fabs(eta) < 0.1) { p0 = 7.459; p1 = 2.615; p2 = 0.5138; //ep0 = 10.4; ep1 = 4.934; ep2 = 0.0034; } double arg = ( pt-p0 )/( 2.*p1 ) ; eff = 0.5 * p2 * (1.+erf(arg)); //err = 0.1*eff; } if (flavor == 14) {// weight muon from tau if (fabs(eta) < 0.1) { float p0 = -1.756; float p1 = 12.38; float p2 = 0.4441; //float ep0= 10.39; float ep1= 7.9; float ep2=0.022; double arg = ( pt-p0 )/( 2.*p1 ) ; eff = 0.5 * p2 * (1.+erf(arg)); //err = 0.1*eff; } else { float p0 = 2.102; float p1 = 0.8293; //float ep0= 0.271; float ep1= 0.0083; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); } } if (flavor == 15) {// weight hadronic tau 1p float wz_tau1p[] = {0.0249278,0.146978,0.225049,0.229212,0.21519,0.206152,0.201559,0.197917,0.209249,0.228336,0.193548,}; //float ewz_tau1p[] ={0.00178577,0.00425252,0.00535052,0.00592126,0.00484684,0.00612941,0.00792099,0.0083006,0.0138307,0.015568,0.0501751,}; int ibin = 0; if (pt > 15) ibin = 1; if (pt > 20) ibin = 2; if (pt > 25) ibin = 3; if (pt > 30) ibin = 4; if (pt > 40) ibin = 5; if (pt > 50) ibin = 6; if (pt > 60) ibin = 7; if (pt > 80) ibin = 8; if (pt > 100) ibin = 9; if (pt > 200) ibin = 10; eff = wz_tau1p[ibin]; //err = ewz_tau1p[ibin]; double avgrate = 0.1718; float wz_tau1p_eta[] = {0.162132,0.176393,0.139619,0.178813,0.185144,0.210027,0.203937,0.178688,0.137034,0.164216,0.163713,}; //float ewz_tau1p_eta[] ={0.00706705,0.00617989,0.00506798,0.00525172,0.00581865,0.00865675,0.00599245,0.00529877,0.00506368,0.00617025,0.00726219,}; ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_tau1p_eta[ibin]; //double err_eta = ewz_tau1p_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 16) { //weight hadronic tau 3p float wz_tau3p[] = {0.000587199,0.00247181,0.0013031,0.00280112,}; //float ewz_tau3p[] ={0.000415091,0.000617187,0.000582385,0.00197792,}; int ibin = 0; if (pt > 15) ibin = 1; if (pt > 20) ibin = 2; if (pt > 40) ibin = 3; if (pt > 80) ibin = 4; eff = wz_tau3p[ibin]; //err = ewz_tau3p[ibin]; } return eff; } /// Function giving observed upper limit (visible cross-section) double getUpperLimit(const string& signal_region, bool observed) { map upperLimitsObserved; upperLimitsObserved["HTlep_3l_offZ_cut_0"] = 11.; upperLimitsObserved["HTlep_3l_offZ_cut_100"] = 8.7; upperLimitsObserved["HTlep_3l_offZ_cut_150"] = 4.0; upperLimitsObserved["HTlep_3l_offZ_cut_200"] = 4.4; upperLimitsObserved["HTlep_3l_offZ_cut_300"] = 1.6; upperLimitsObserved["HTlep_2ltau_offZ_cut_0"] = 25.; upperLimitsObserved["HTlep_2ltau_offZ_cut_100"] = 14.; upperLimitsObserved["HTlep_2ltau_offZ_cut_150"] = 6.1; upperLimitsObserved["HTlep_2ltau_offZ_cut_200"] = 3.3; upperLimitsObserved["HTlep_2ltau_offZ_cut_300"] = 1.2; upperLimitsObserved["HTlep_3l_onZ_cut_0"] = 48.; upperLimitsObserved["HTlep_3l_onZ_cut_100"] = 38.; upperLimitsObserved["HTlep_3l_onZ_cut_150"] = 14.; upperLimitsObserved["HTlep_3l_onZ_cut_200"] = 7.2; upperLimitsObserved["HTlep_3l_onZ_cut_300"] = 4.5; upperLimitsObserved["HTlep_2ltau_onZ_cut_0"] = 85.; upperLimitsObserved["HTlep_2ltau_onZ_cut_100"] = 53.; upperLimitsObserved["HTlep_2ltau_onZ_cut_150"] = 11.0; upperLimitsObserved["HTlep_2ltau_onZ_cut_200"] = 5.2; upperLimitsObserved["HTlep_2ltau_onZ_cut_300"] = 3.0; upperLimitsObserved["METStrong_3l_offZ_cut_0"] = 2.6; upperLimitsObserved["METStrong_3l_offZ_cut_50"] = 2.1; upperLimitsObserved["METStrong_3l_offZ_cut_75"] = 2.1; upperLimitsObserved["METStrong_2ltau_offZ_cut_0"] = 4.2; upperLimitsObserved["METStrong_2ltau_offZ_cut_50"] = 3.1; upperLimitsObserved["METStrong_2ltau_offZ_cut_75"] = 2.6; upperLimitsObserved["METStrong_3l_onZ_cut_20"] = 11.0; upperLimitsObserved["METStrong_3l_onZ_cut_50"] = 6.4; upperLimitsObserved["METStrong_3l_onZ_cut_75"] = 5.1; upperLimitsObserved["METStrong_2ltau_onZ_cut_20"] = 5.9; upperLimitsObserved["METStrong_2ltau_onZ_cut_50"] = 3.4; upperLimitsObserved["METStrong_2ltau_onZ_cut_75"] = 1.2; upperLimitsObserved["METWeak_3l_offZ_cut_0"] = 11.; upperLimitsObserved["METWeak_3l_offZ_cut_50"] = 5.3; upperLimitsObserved["METWeak_3l_offZ_cut_75"] = 3.1; upperLimitsObserved["METWeak_2ltau_offZ_cut_0"] = 23.; upperLimitsObserved["METWeak_2ltau_offZ_cut_50"] = 4.3; upperLimitsObserved["METWeak_2ltau_offZ_cut_75"] = 3.1; upperLimitsObserved["METWeak_3l_onZ_cut_20"] = 41.; upperLimitsObserved["METWeak_3l_onZ_cut_50"] = 16.; upperLimitsObserved["METWeak_3l_onZ_cut_75"] = 8.0; upperLimitsObserved["METWeak_2ltau_onZ_cut_20"] = 80.; upperLimitsObserved["METWeak_2ltau_onZ_cut_50"] = 4.4; upperLimitsObserved["METWeak_2ltau_onZ_cut_75"] = 1.8; upperLimitsObserved["Meff_3l_offZ_cut_0"] = 11.; upperLimitsObserved["Meff_3l_offZ_cut_150"] = 8.1; upperLimitsObserved["Meff_3l_offZ_cut_300"] = 3.1; upperLimitsObserved["Meff_3l_offZ_cut_500"] = 2.1; upperLimitsObserved["Meff_2ltau_offZ_cut_0"] = 25.; upperLimitsObserved["Meff_2ltau_offZ_cut_150"] = 12.; upperLimitsObserved["Meff_2ltau_offZ_cut_300"] = 3.9; upperLimitsObserved["Meff_2ltau_offZ_cut_500"] = 2.2; upperLimitsObserved["Meff_3l_onZ_cut_0"] = 48.; upperLimitsObserved["Meff_3l_onZ_cut_150"] = 37.; upperLimitsObserved["Meff_3l_onZ_cut_300"] = 11.; upperLimitsObserved["Meff_3l_onZ_cut_500"] = 4.8; upperLimitsObserved["Meff_2ltau_onZ_cut_0"] = 85.; upperLimitsObserved["Meff_2ltau_onZ_cut_150"] = 28.; upperLimitsObserved["Meff_2ltau_onZ_cut_300"] = 5.9; upperLimitsObserved["Meff_2ltau_onZ_cut_500"] = 1.9; upperLimitsObserved["MeffStrong_3l_offZ_cut_0"] = 3.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_150"] = 3.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_300"] = 2.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_500"] = 2.1; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_0"] = 3.9; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_150"] = 4.0; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_300"] = 2.9; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_500"] = 1.5; upperLimitsObserved["MeffStrong_3l_onZ_cut_0"] = 10.0; upperLimitsObserved["MeffStrong_3l_onZ_cut_150"] = 10.0; upperLimitsObserved["MeffStrong_3l_onZ_cut_300"] = 6.8; upperLimitsObserved["MeffStrong_3l_onZ_cut_500"] = 3.9; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_0"] = 1.6; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_150"] = 1.4; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_300"] = 1.5; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_500"] = 0.9; // Expected upper limits are also given but not used in this analysis map upperLimitsExpected; upperLimitsExpected["HTlep_3l_offZ_cut_0"] = 11.; upperLimitsExpected["HTlep_3l_offZ_cut_100"] = 8.5; upperLimitsExpected["HTlep_3l_offZ_cut_150"] = 4.6; upperLimitsExpected["HTlep_3l_offZ_cut_200"] = 3.6; upperLimitsExpected["HTlep_3l_offZ_cut_300"] = 1.9; upperLimitsExpected["HTlep_2ltau_offZ_cut_0"] = 23.; upperLimitsExpected["HTlep_2ltau_offZ_cut_100"] = 14.; upperLimitsExpected["HTlep_2ltau_offZ_cut_150"] = 6.4; upperLimitsExpected["HTlep_2ltau_offZ_cut_200"] = 3.6; upperLimitsExpected["HTlep_2ltau_offZ_cut_300"] = 1.5; upperLimitsExpected["HTlep_3l_onZ_cut_0"] = 33.; upperLimitsExpected["HTlep_3l_onZ_cut_100"] = 25.; upperLimitsExpected["HTlep_3l_onZ_cut_150"] = 12.; upperLimitsExpected["HTlep_3l_onZ_cut_200"] = 6.5; upperLimitsExpected["HTlep_3l_onZ_cut_300"] = 3.1; upperLimitsExpected["HTlep_2ltau_onZ_cut_0"] = 94.; upperLimitsExpected["HTlep_2ltau_onZ_cut_100"] = 61.; upperLimitsExpected["HTlep_2ltau_onZ_cut_150"] = 9.9; upperLimitsExpected["HTlep_2ltau_onZ_cut_200"] = 4.5; upperLimitsExpected["HTlep_2ltau_onZ_cut_300"] = 1.9; upperLimitsExpected["METStrong_3l_offZ_cut_0"] = 3.1; upperLimitsExpected["METStrong_3l_offZ_cut_50"] = 2.4; upperLimitsExpected["METStrong_3l_offZ_cut_75"] = 2.3; upperLimitsExpected["METStrong_2ltau_offZ_cut_0"] = 4.8; upperLimitsExpected["METStrong_2ltau_offZ_cut_50"] = 3.3; upperLimitsExpected["METStrong_2ltau_offZ_cut_75"] = 2.1; upperLimitsExpected["METStrong_3l_onZ_cut_20"] = 8.7; upperLimitsExpected["METStrong_3l_onZ_cut_50"] = 4.9; upperLimitsExpected["METStrong_3l_onZ_cut_75"] = 3.8; upperLimitsExpected["METStrong_2ltau_onZ_cut_20"] = 7.3; upperLimitsExpected["METStrong_2ltau_onZ_cut_50"] = 2.8; upperLimitsExpected["METStrong_2ltau_onZ_cut_75"] = 1.5; upperLimitsExpected["METWeak_3l_offZ_cut_0"] = 10.; upperLimitsExpected["METWeak_3l_offZ_cut_50"] = 4.7; upperLimitsExpected["METWeak_3l_offZ_cut_75"] = 3.0; upperLimitsExpected["METWeak_2ltau_offZ_cut_0"] = 21.; upperLimitsExpected["METWeak_2ltau_offZ_cut_50"] = 4.0; upperLimitsExpected["METWeak_2ltau_offZ_cut_75"] = 2.6; upperLimitsExpected["METWeak_3l_onZ_cut_20"] = 30.; upperLimitsExpected["METWeak_3l_onZ_cut_50"] = 10.; upperLimitsExpected["METWeak_3l_onZ_cut_75"] = 5.4; upperLimitsExpected["METWeak_2ltau_onZ_cut_20"] = 88.; upperLimitsExpected["METWeak_2ltau_onZ_cut_50"] = 5.5; upperLimitsExpected["METWeak_2ltau_onZ_cut_75"] = 2.2; upperLimitsExpected["Meff_3l_offZ_cut_0"] = 11.; upperLimitsExpected["Meff_3l_offZ_cut_150"] = 8.8; upperLimitsExpected["Meff_3l_offZ_cut_300"] = 3.7; upperLimitsExpected["Meff_3l_offZ_cut_500"] = 2.1; upperLimitsExpected["Meff_2ltau_offZ_cut_0"] = 23.; upperLimitsExpected["Meff_2ltau_offZ_cut_150"] = 13.; upperLimitsExpected["Meff_2ltau_offZ_cut_300"] = 4.9; upperLimitsExpected["Meff_2ltau_offZ_cut_500"] = 2.4; upperLimitsExpected["Meff_3l_onZ_cut_0"] = 33.; upperLimitsExpected["Meff_3l_onZ_cut_150"] = 25.; upperLimitsExpected["Meff_3l_onZ_cut_300"] = 9.; upperLimitsExpected["Meff_3l_onZ_cut_500"] = 3.9; upperLimitsExpected["Meff_2ltau_onZ_cut_0"] = 94.; upperLimitsExpected["Meff_2ltau_onZ_cut_150"] = 35.; upperLimitsExpected["Meff_2ltau_onZ_cut_300"] = 6.8; upperLimitsExpected["Meff_2ltau_onZ_cut_500"] = 2.5; upperLimitsExpected["MeffStrong_3l_offZ_cut_0"] = 3.9; upperLimitsExpected["MeffStrong_3l_offZ_cut_150"] = 3.9; upperLimitsExpected["MeffStrong_3l_offZ_cut_300"] = 3.0; upperLimitsExpected["MeffStrong_3l_offZ_cut_500"] = 2.0; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_0"] = 3.8; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_150"] = 3.9; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_300"] = 3.1; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_500"] = 1.6; upperLimitsExpected["MeffStrong_3l_onZ_cut_0"] = 6.9; upperLimitsExpected["MeffStrong_3l_onZ_cut_150"] = 7.1; upperLimitsExpected["MeffStrong_3l_onZ_cut_300"] = 4.9; upperLimitsExpected["MeffStrong_3l_onZ_cut_500"] = 3.0; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_0"] = 2.4; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_150"] = 2.5; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_300"] = 2.0; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_500"] = 1.1; if (observed) return upperLimitsObserved[signal_region]; else return upperLimitsExpected[signal_region]; } /// Function checking if there is an OSSF lepton pair or a combination of 3 leptons with an invariant mass close to the Z mass /// @todo Should the reference Z mass be 91.2? int isonZ (const Particles& particles) { int onZ = 0; double best_mass_2 = 999.; double best_mass_3 = 999.; // Loop over all 2 particle combinations to find invariant mass of OSSF pair closest to Z mass foreach ( const Particle& p1, particles ) { foreach ( const Particle& p2, particles ) { double mass_difference_2_old = fabs(91.0 - best_mass_2); double mass_difference_2_new = fabs(91.0 - (p1.momentum() + p2.momentum()).mass()/GeV); // If particle combination is OSSF pair calculate mass difference to Z mass if ( (p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169) ) { // Get invariant mass closest to Z mass if (mass_difference_2_new < mass_difference_2_old) best_mass_2 = (p1.momentum() + p2.momentum()).mass()/GeV; // In case there is an OSSF pair take also 3rd lepton into account (e.g. from FSR and photon to electron conversion) foreach ( const Particle & p3 , particles ) { double mass_difference_3_old = fabs(91.0 - best_mass_3); double mass_difference_3_new = fabs(91.0 - (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV); if (mass_difference_3_new < mass_difference_3_old) best_mass_3 = (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV; } } } } // Pick the minimum invariant mass of the best OSSF pair combination and the best 3 lepton combination // If this mass is in a 20 GeV window around the Z mass, the event is classified as onZ double best_mass = min(best_mass_2, best_mass_3); if (fabs(91.0 - best_mass) < 20) onZ = 1; return onZ; } //@} private: /// Histograms //@{ Histo1DPtr _h_HTlep_all, _h_HTjets_all, _h_MET_all, _h_Meff_all; Histo1DPtr _h_pt_1_3l, _h_pt_2_3l, _h_pt_3_3l, _h_pt_1_2ltau, _h_pt_2_2ltau, _h_pt_3_2ltau; Histo1DPtr _h_e_n, _h_mu_n, _h_tau_n; Histo1DPtr _h_excluded; //@} /// Fiducial efficiencies to model the effects of the ATLAS detector bool _use_fiducial_lepton_efficiency; /// List of signal regions and event counts per signal region vector _signal_regions; map _eventCountsPerSR; }; DECLARE_RIVET_PLUGIN(ATLAS_2012_I1204447); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1282441.cc b/analyses/pluginATLAS/ATLAS_2014_I1282441.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1282441.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1282441.cc @@ -1,91 +1,91 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" namespace Rivet { class ATLAS_2014_I1282441 : public Analysis { public: ATLAS_2014_I1282441() : Analysis("ATLAS_2014_I1282441") { } void init() { // Use a large eta range such that we can discriminate on y /// @todo Convert to use a y-cut directly - UnstableFinalState ufs(Cuts::abseta < 10 && Cuts::pT > 500*MeV); + UnstableParticles ufs(Cuts::abseta < 10 && Cuts::pT > 500*MeV); IdentifiedFinalState phis(ufs); phis.acceptIdPair(PID::PHI); declare(phis, "Phis"); IdentifiedFinalState kpms(Cuts::abseta < 2.0 && Cuts::pT > 230*MeV); kpms.acceptIdPair(PID::KPLUS); declare(kpms, "Kpms"); _h_phi_rapidity = bookHisto1D(2,1,1); _h_phi_pT = bookHisto1D(1,1,1); } void analyze(const Event& event) { const Particles& ks_all = apply(event, "Kpms").particles(); Particles kp, km; foreach (const Particle& p, ks_all) { if (!p.hasAncestor(PID::PHI)) { MSG_DEBUG("-- K not from phi."); continue; } if (p.p3().mod() > 800*MeV) { MSG_DEBUG("-- p K too high."); continue; } (p.charge() > 0 ? kp : km).push_back(p); } const Particles& phis_all = apply(event, "Phis").particles(); Particles phis; /// @todo Use particles(Cuts&) instead foreach (const Particle& p, phis_all) { if ( p.absrap() > 0.8 ) { MSG_DEBUG("-- phi Y too high."); continue; } if ( p.pT() > 1.2*GeV ) { MSG_DEBUG("-- phi pT too high."); continue; } phis.push_back(p); } // Find Phi -> KK decays through matching of the kinematics if (!kp.empty() && !km.empty() && !phis.empty()) { const double w = event.weight(); MSG_DEBUG("Numbers of particles: #phi=" << phis.size() << ", #K+=" << kp.size() << ", #K-=" << km.size()); for (size_t ip = 0; ip < phis.size(); ++ip) { const Particle& phi = phis[ip]; for (size_t ikm = 0; ikm < km.size(); ++ikm) { for (size_t ikp = 0; ikp < kp.size(); ++ikp) { const FourMomentum mom = kp[ikp].mom() + km[ikm].mom(); if ( fuzzyEquals(mom.mass(), phi.mass(), 1e-5) ) { MSG_DEBUG("Accepted combinatoric: phi#:" << ip << " K+#:" << ikp << " K-#:" << ikm); _h_phi_rapidity->fill(phi.absrap(), w); _h_phi_pT->fill(phi.pT()/MeV, w); } else { MSG_DEBUG("Rejected combinatoric: phi#:" << ip << " K+#:" << ikp << " K-#:" << ikm << " Mass difference is " << mom.mass()-phi.mass()); } } } } } } void finalize() { scale(_h_phi_rapidity, crossSection()/microbarn/sumOfWeights()); scale(_h_phi_pT, crossSection()/microbarn/sumOfWeights()); } private: Histo1DPtr _h_phi_rapidity, _h_phi_pT; }; DECLARE_RIVET_PLUGIN(ATLAS_2014_I1282441); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1282447.cc b/analyses/pluginATLAS/ATLAS_2014_I1282447.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1282447.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1282447.cc @@ -1,597 +1,597 @@ // -*- C++ -*- // ATLAS W+c analysis ////////////////////////////////////////////////////////////////////////// /* Description of rivet analysis ATLAS_2014_I1282447 W+c production This rivet routine implements the ATLAS W+c analysis. Apart from those histograms, described and published on HEP Data, here are some helper histograms defined, these are: d02-x01-y01, d02-x01-y02 and d08-x01-y01 are ratios, the nominator ("_plus") and denominator ("_minus") histograms are also given, so that the ratios can be reconstructed if need be (e.g. when running on separate samples). d05 and d06 are ratios over inclusive W production. The routine has to be run on a sample for inclusive W production in order to make sure the denominator ("_winc") is correctly filled. The ratios can be constructed using the following sample code: python divideWCharm.py import yoda hists_wc = yoda.read("Rivet_Wc.yoda") hists_winc = yoda.read("Rivet_Winc.yoda") ## division histograms --> ONLY for different plus minus runs # (merge before using yodamerge Rivet_plus.yoda Rivet_minus.yoda > Rivet_Wc.yoda) d02y01_plus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y01_plus"] d02y01_minus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y01_minus"] ratio_d02y01 = d02y01_plus.divide(d02y01_minus) ratio_d02y01.path = "/ATLAS_2014_I1282447/d02-x01-y01" d02y02_plus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y02_plus"] d02y02_minus = hists_wc["/ATLAS_2014_I1282447/d02-x01-y02_minus"] ratio_d02y02= d02y02_plus.divide(d02y02_minus) ratio_d02y02.path = "/ATLAS_2014_I1282447/d02-x01-y02" d08y01_plus = hists_wc["/ATLAS_2014_I1282447/d08-x01-y01_plus"] d08y01_minus = hists_wc["/ATLAS_2014_I1282447/d08-x01-y01_minus"] ratio_d08y01= d08y01_plus.divide(d08y01_minus) ratio_d08y01.path = "/ATLAS_2014_I1282447/d08-x01-y01" # inclusive cross section h_winc = hists_winc["/ATLAS_2014_I1282447/d05-x01-y01"] h_d = hists_wc["/ATLAS_2014_I1282447/d01-x01-y02"] h_dstar= hists_wc["/ATLAS_2014_I1282447/d01-x01-y03"] ratio_wd = h_d.divide(h_winc) ratio_wd.path = "/ATLAS_2014_I1282447/d05-x01-y02" ratio_wdstar = h_d.divide(h_winc) ratio_wdstar.path = "/ATLAS_2014_I1282447/d05-x01-y03" # pT differential h_winc_plus = hists_winc["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus = hists_winc["/ATLAS_2014_I1282447/d06-x01-y02_winc"] h_wd_plus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y01_wplus"] h_wd_minus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y02_wminus"] h_wdstar_plus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y03_wplus"] h_wdstar_minus = hists_wc["/ATLAS_2014_I1282447/d06-x01-y04_wminus"] ratio_wd_plus = h_wd_plus.divide(h_winc_plus) ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" ratio_wd_minus = h_wd_plus.divide(h_winc_minus) ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" ratio_wdstar_plus = h_wdstar_plus.divide(h_winc_plus) ratio_wdstar_plus.path = "/ATLAS_2014_I1282447/d06-x01-y03" ratio_wdstar_minus = h_wdstar_plus.divide(h_winc_minus) ratio_wdstar_minus.path = "/ATLAS_2014_I1282447/d06-x01-y04" ratio_wd_plus = h_wd_plus.divide(h_winc_plus) ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" ratio_wd_minus = h_wd_plus.divide(h_winc_minus) ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" h_winc_plus= hists_winc["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus= hists_winc["/ATLAS_2014_I1282447/d06-x01-y02_winc"] ## copy other histograms for plotting d01x01y01= hists_wc["/ATLAS_2014_I1282447/d01-x01-y01"] d01x01y01.path = "/ATLAS_2014_I1282447/d01-x01-y01" d01x01y02= hists_wc["/ATLAS_2014_I1282447/d01-x01-y02"] d01x01y02.path = "/ATLAS_2014_I1282447/d01-x01-y02" d01x01y03= hists_wc["/ATLAS_2014_I1282447/d01-x01-y03"] d01x01y03.path = "/ATLAS_2014_I1282447/d01-x01-y03" d03x01y01= hists_wc["/ATLAS_2014_I1282447/d03-x01-y01"] d03x01y01.path = "/ATLAS_2014_I1282447/d03-x01-y01" d03x01y02= hists_wc["/ATLAS_2014_I1282447/d03-x01-y02"] d03x01y02.path = "/ATLAS_2014_I1282447/d03-x01-y02" d04x01y01= hists_wc["/ATLAS_2014_I1282447/d04-x01-y01"] d04x01y01.path = "/ATLAS_2014_I1282447/d04-x01-y01" d04x01y02= hists_wc["/ATLAS_2014_I1282447/d04-x01-y02"] d04x01y02.path = "/ATLAS_2014_I1282447/d04-x01-y02" d04x01y03= hists_wc["/ATLAS_2014_I1282447/d04-x01-y03"] d04x01y03.path = "/ATLAS_2014_I1282447/d04-x01-y03" d04x01y04= hists_wc["/ATLAS_2014_I1282447/d04-x01-y04"] d04x01y04.path = "/ATLAS_2014_I1282447/d04-x01-y04" d07x01y01= hists_wc["/ATLAS_2014_I1282447/d07-x01-y01"] d07x01y01.path = "/ATLAS_2014_I1282447/d07-x01-y01" yoda.write([ratio_d02y01,ratio_d02y02,ratio_d08y01, ratio_wd ,ratio_wdstar,ratio_wd_plus,ratio_wd_minus ,ratio_wdstar_plus,ratio_wdstar_minus,d01x01y01,d01x01y02,d01x01y03,d03x01y01,d03x01y02,d04x01y01,d04x01y02,d04x01y03,d04x01y04,d07x01y01],"validation.yoda") */ ////////////////////////////////////////////////////////////////////////// #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class ATLAS_2014_I1282447 : public Analysis { public: /// Constructor ATLAS_2014_I1282447() : Analysis("ATLAS_2014_I1282447") { setNeedsCrossSection(true); } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// @todo Initialise and register projections here - UnstableFinalState fs; + UnstableParticles fs; Cut cuts = Cuts::etaIn(-2.5, 2.5) & (Cuts::pT > 20*GeV); /// should use sample WITHOUT QED radiation off the electron WFinder wfinder_born_el(fs, cuts, PID::ELECTRON, 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::CLUSTERALL, WFinder::TRACK); declare(wfinder_born_el, "WFinder_born_el"); WFinder wfinder_born_mu(fs, cuts, PID::MUON , 25*GeV, 8000*GeV, 15*GeV, 0.1, WFinder::CLUSTERALL, WFinder::TRACK); declare(wfinder_born_mu, "WFinder_born_mu"); // all hadrons that could be coming from a charm decay -- // -- for safety, use region -3.5 - 3.5 - declare(UnstableFinalState(Cuts::abseta <3.5), "hadrons"); + declare(UnstableParticles(Cuts::abseta <3.5), "hadrons"); // Input for the jets: no neutrinos, no muons, and no electron which passed the electron cuts // also: NO electron, muon or tau (needed due to ATLAS jet truth reconstruction feature) VetoedFinalState veto; veto.addVetoOnThisFinalState(wfinder_born_el); veto.addVetoOnThisFinalState(wfinder_born_mu); veto.addVetoPairId(PID::ELECTRON); veto.addVetoPairId(PID::MUON); veto.addVetoPairId(PID::TAU); FastJets jets(veto, FastJets::ANTIKT, 0.4); declare(jets, "jets"); // Book histograms // charge separated integrated cross sections _hist_wcjet_charge = bookHisto1D("d01-x01-y01"); _hist_wd_charge = bookHisto1D("d01-x01-y02"); _hist_wdstar_charge = bookHisto1D("d01-x01-y03"); // charge integrated total cross sections _hist_wcjet_ratio = bookScatter2D("d02-x01-y01"); _hist_wd_ratio = bookScatter2D("d02-x01-y02"); _hist_wcjet_minus = bookHisto1D("d02-x01-y01_minus"); _hist_wd_minus = bookHisto1D("d02-x01-y02_minus"); _hist_wcjet_plus = bookHisto1D("d02-x01-y01_plus"); _hist_wd_plus = bookHisto1D("d02-x01-y02_plus"); // eta distributions _hist_wplus_wcjet_eta_lep = bookHisto1D("d03-x01-y01"); _hist_wminus_wcjet_eta_lep = bookHisto1D("d03-x01-y02"); _hist_wplus_wdminus_eta_lep = bookHisto1D("d04-x01-y01"); _hist_wminus_wdplus_eta_lep = bookHisto1D("d04-x01-y02"); _hist_wplus_wdstar_eta_lep = bookHisto1D("d04-x01-y03"); _hist_wminus_wdstar_eta_lep = bookHisto1D("d04-x01-y04"); // ratio of cross section (WD over W inclusive) // postprocess! _hist_w_inc = bookHisto1D("d05-x01-y01"); _hist_wd_winc_ratio = bookScatter2D("d05-x01-y02"); _hist_wdstar_winc_ratio = bookScatter2D("d05-x01-y03"); // ratio of cross section (WD over W inclusive -- function of pT of D meson) _hist_wplusd_wplusinc_pt_ratio = bookScatter2D("d06-x01-y01"); _hist_wminusd_wminusinc_pt_ratio = bookScatter2D("d06-x01-y02"); _hist_wplusdstar_wplusinc_pt_ratio = bookScatter2D("d06-x01-y03"); _hist_wminusdstar_wminusinc_pt_ratio = bookScatter2D("d06-x01-y04"); // could use for postprocessing! _hist_wplusd_wplusinc_pt = bookHisto1D("d06-x01-y01_wplus"); _hist_wminusd_wminusinc_pt = bookHisto1D("d06-x01-y02_wminus"); _hist_wplusdstar_wplusinc_pt = bookHisto1D("d06-x01-y03_wplus"); _hist_wminusdstar_wminusinc_pt = bookHisto1D("d06-x01-y04_wminus"); _hist_wplus_winc = bookHisto1D("d06-x01-y01_winc"); _hist_wminus_winc = bookHisto1D("d06-x01-y02_winc"); // jet multiplicity of charge integrated W+cjet cross section (+0 or +1 jet in addition to the charm jet) _hist_wcjet_jets = bookHisto1D("d07-x01-y01"); // jet multiplicity of W+cjet cross section ratio (+0 or +1 jet in addition to the charm jet) _hist_wcjet_jets_ratio = bookScatter2D("d08-x01-y01"); _hist_wcjet_jets_plus = bookHisto1D("d08-x01-y01_plus"); _hist_wcjet_jets_minus = bookHisto1D("d08-x01-y01_minus"); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); double charge_weight = 0; // account for OS/SS events int lepton_charge = 0; double lepton_eta = 0.; /// Find leptons const WFinder& wfinder_born_el = apply(event, "WFinder_born_el"); const WFinder& wfinder_born_mu = apply(event, "WFinder_born_mu"); if (wfinder_born_el.empty() && wfinder_born_mu.empty()) { MSG_DEBUG("No W bosons found"); vetoEvent; } bool keepevent = false; //check electrons if (!wfinder_born_el.empty()) { const FourMomentum nu = wfinder_born_el.constituentNeutrinos()[0]; if (wfinder_born_el.mT() > 40*GeV && nu.pT() > 25*GeV) { keepevent = true; lepton_charge = wfinder_born_el.constituentLeptons()[0].charge(); lepton_eta = fabs(wfinder_born_el.constituentLeptons()[0].pseudorapidity()); } } //check muons if (!wfinder_born_mu.empty()) { const FourMomentum nu = wfinder_born_mu.constituentNeutrinos()[0]; if (wfinder_born_mu.mT() > 40*GeV && nu.pT() > 25*GeV) { keepevent = true; lepton_charge = wfinder_born_mu.constituentLeptons()[0].charge(); lepton_eta = fabs(wfinder_born_mu.constituentLeptons()[0].pseudorapidity()); } } if (!keepevent) { MSG_DEBUG("Event does not pass mT and MET cuts"); vetoEvent; } if (lepton_charge > 0) { _hist_wplus_winc->fill(10., weight); _hist_wplus_winc->fill(16., weight); _hist_wplus_winc->fill(30., weight); _hist_wplus_winc->fill(60., weight); _hist_w_inc->fill(+1, weight); } else if (lepton_charge < 0) { _hist_wminus_winc->fill(10., weight); _hist_wminus_winc->fill(16., weight); _hist_wminus_winc->fill(30., weight); _hist_wminus_winc->fill(60., weight); _hist_w_inc->fill(-1, weight); } // Find hadrons in the event - const UnstableFinalState& fs = apply(event, "hadrons"); + const UnstableParticles& fs = apply(event, "hadrons"); /// FIND Different channels // 1: wcjet // get jets const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT>25.0*GeV && Cuts::abseta<2.5); // loop over jets to select jets used to match to charm Jets js; int matched_charmHadron = 0; double charm_charge = 0.; int njets = 0; int nj = 0; bool mat_jet = false; double ptcharm = 0; if (matched_charmHadron > -1) { for (const Jet& j : jets) { mat_jet = false; njets += 1; for (const Particle& p : fs.particles()) { /// @todo Avoid touching HepMC! const GenParticle* part = p.genParticle(); if (p.hasCharm()) { //if(isFromBDecay(p)) continue; if (p.fromBottom()) continue; if (p.pT() < 5*GeV ) continue; if (hasCharmedChildren(part)) continue; if (deltaR(p, j) < 0.3) { mat_jet = true; if (p.pT() > ptcharm) { charm_charge = part->pdg_id(); ptcharm = p.pT(); } } } } if (mat_jet) nj++; } if (charm_charge * lepton_charge > 0) charge_weight = -1; else charge_weight = +1; if (nj == 1) { if (lepton_charge > 0) { _hist_wcjet_charge ->fill( 1, weight*charge_weight); _hist_wcjet_plus ->fill( 0, weight*charge_weight); _hist_wplus_wcjet_eta_lep ->fill(lepton_eta, weight*charge_weight); _hist_wcjet_jets_plus ->fill(njets-1 , weight*charge_weight); } else if (lepton_charge < 0) { _hist_wcjet_charge ->fill( -1, weight*charge_weight); _hist_wcjet_minus ->fill( 0, weight*charge_weight); _hist_wminus_wcjet_eta_lep->fill(lepton_eta, weight*charge_weight); _hist_wcjet_jets_minus ->fill(njets-1 , weight*charge_weight); } _hist_wcjet_jets->fill(njets-1, weight*charge_weight); } } // // 1/2: w+d(*) meson for (const Particle& p : fs.particles()) { /// @todo Avoid touching HepMC! const GenParticle* part = p.genParticle(); if (p.pT() < 8*GeV) continue; if (fabs(p.eta()) > 2.2) continue; // W+D if (abs(part->pdg_id()) == 411) { if (lepton_charge * part->pdg_id() > 0) charge_weight = -1; else charge_weight = +1; // fill histos if (lepton_charge > 0) { _hist_wd_charge ->fill( 1, weight*charge_weight); _hist_wd_plus ->fill( 0, weight*charge_weight); _hist_wplus_wdminus_eta_lep->fill(lepton_eta, weight*charge_weight); _hist_wplusd_wplusinc_pt ->fill( p.pT(), weight*charge_weight); } else if (lepton_charge < 0) { _hist_wd_charge ->fill( -1, weight*charge_weight); _hist_wd_minus ->fill( 0, weight*charge_weight); _hist_wminus_wdplus_eta_lep->fill(lepton_eta, weight*charge_weight); _hist_wminusd_wminusinc_pt ->fill(p.pT() , weight*charge_weight); } } // W+Dstar if ( abs(part->pdg_id()) == 413 ) { if (lepton_charge*part->pdg_id() > 0) charge_weight = -1; else charge_weight = +1; if (lepton_charge > 0) { _hist_wdstar_charge->fill(+1, weight*charge_weight); _hist_wd_plus->fill( 0, weight*charge_weight); _hist_wplus_wdstar_eta_lep->fill( lepton_eta, weight*charge_weight); _hist_wplusdstar_wplusinc_pt->fill( p.pT(), weight*charge_weight); } else if (lepton_charge < 0) { _hist_wdstar_charge->fill(-1, weight*charge_weight); _hist_wd_minus->fill(0, weight*charge_weight); _hist_wminus_wdstar_eta_lep->fill(lepton_eta, weight*charge_weight); _hist_wminusdstar_wminusinc_pt->fill(p.pT(), weight*charge_weight); } } } } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection() / sumOfWeights(); // norm to cross section // d01 scale(_hist_wcjet_charge, sf); scale(_hist_wd_charge, sf); scale(_hist_wdstar_charge, sf); //d02 scale(_hist_wcjet_plus, sf); scale(_hist_wcjet_minus, sf); scale(_hist_wd_plus, sf); scale(_hist_wd_minus, sf); divide(_hist_wcjet_plus, _hist_wcjet_minus, _hist_wcjet_ratio); divide(_hist_wd_plus, _hist_wd_minus, _hist_wd_ratio ); //d03 scale(_hist_wplus_wcjet_eta_lep, sf); scale(_hist_wminus_wcjet_eta_lep, sf); //d04 scale(_hist_wplus_wdminus_eta_lep, crossSection()/sumOfWeights()); scale(_hist_wminus_wdplus_eta_lep, crossSection()/sumOfWeights()); scale(_hist_wplus_wdstar_eta_lep , crossSection()/sumOfWeights()); scale(_hist_wminus_wdstar_eta_lep, crossSection()/sumOfWeights()); //d05 scale(_hist_w_inc, 0.01 * sf); // in percent --> /100 divide(_hist_wd_charge, _hist_w_inc, _hist_wd_winc_ratio ); divide(_hist_wdstar_charge, _hist_w_inc, _hist_wdstar_winc_ratio); //d06, in percentage! scale(_hist_wplusd_wplusinc_pt, sf); scale(_hist_wminusd_wminusinc_pt, sf); scale(_hist_wplusdstar_wplusinc_pt, sf); scale(_hist_wminusdstar_wminusinc_pt, sf); scale(_hist_wplus_winc, 0.01 * sf); // in percent --> /100 scale(_hist_wminus_winc, 0.01 * sf); // in percent --> /100 divide(_hist_wplusd_wplusinc_pt, _hist_wplus_winc , _hist_wplusd_wplusinc_pt_ratio ); divide(_hist_wminusd_wminusinc_pt, _hist_wminus_winc, _hist_wminusd_wminusinc_pt_ratio ); divide(_hist_wplusdstar_wplusinc_pt, _hist_wplus_winc , _hist_wplusdstar_wplusinc_pt_ratio ); divide(_hist_wminusdstar_wminusinc_pt, _hist_wminus_winc, _hist_wminusdstar_wminusinc_pt_ratio); //d07 scale(_hist_wcjet_jets, sf); //d08 scale(_hist_wcjet_jets_minus, sf); scale(_hist_wcjet_jets_plus, sf); divide(_hist_wcjet_jets_plus, _hist_wcjet_jets_minus , _hist_wcjet_jets_ratio); } //@} private: // Data members like post-cuts event weight counters go here // Check whether particle comes from b-decay /// @todo Use built-in method and avoid HepMC bool isFromBDecay(const Particle& p) { bool isfromB = false; if (p.genParticle() == nullptr) return false; const GenParticle* part = p.genParticle(); const GenVertex* ivtx = const_cast(part->production_vertex()); while (ivtx) { if (ivtx->particles_in_size() < 1) { isfromB = false; break; } const HepMC::GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin(); part = (*iPart_invtx); if (!part) { isfromB = false; break; } isfromB = PID::hasBottom(part->pdg_id()); if (isfromB == true) break; ivtx = const_cast(part->production_vertex()); if ( part->pdg_id() == 2212 || !ivtx ) break; // reached beam } return isfromB; } // Check whether particle has charmed children /// @todo Use built-in method and avoid HepMC! bool hasCharmedChildren(const GenParticle *part) { bool hasCharmedChild = false; if (part == nullptr) return false; const GenVertex* ivtx = const_cast(part->end_vertex()); if (ivtx == nullptr) return false; // if (ivtx->particles_out_size() < 2) return false; HepMC::GenVertex::particles_out_const_iterator iPart_invtx = ivtx->particles_out_const_begin(); HepMC::GenVertex::particles_out_const_iterator end_invtx = ivtx->particles_out_const_end(); for ( ; iPart_invtx != end_invtx; iPart_invtx++ ) { const GenParticle* p2 = (*iPart_invtx); if (p2 == part) continue; hasCharmedChild = PID::hasCharm(p2->pdg_id()); if (hasCharmedChild == true) break; hasCharmedChild = hasCharmedChildren(p2); if (hasCharmedChild == true) break; } return hasCharmedChild; } private: /// @name Histograms //@{ //d01-x01- Histo1DPtr _hist_wcjet_charge; Histo1DPtr _hist_wd_charge; Histo1DPtr _hist_wdstar_charge; //d02-x01- Scatter2DPtr _hist_wcjet_ratio; Scatter2DPtr _hist_wd_ratio; Histo1DPtr _hist_wcjet_plus; Histo1DPtr _hist_wd_plus; Histo1DPtr _hist_wcjet_minus; Histo1DPtr _hist_wd_minus; //d03-x01- Histo1DPtr _hist_wplus_wcjet_eta_lep; Histo1DPtr _hist_wminus_wcjet_eta_lep; //d04-x01- Histo1DPtr _hist_wplus_wdminus_eta_lep; Histo1DPtr _hist_wminus_wdplus_eta_lep; //d05-x01- Histo1DPtr _hist_wplus_wdstar_eta_lep; Histo1DPtr _hist_wminus_wdstar_eta_lep; // postprocessing histos //d05-x01 Histo1DPtr _hist_w_inc; Scatter2DPtr _hist_wd_winc_ratio; Scatter2DPtr _hist_wdstar_winc_ratio; //d06-x01 Histo1DPtr _hist_wplus_winc; Histo1DPtr _hist_wminus_winc; Scatter2DPtr _hist_wplusd_wplusinc_pt_ratio; Scatter2DPtr _hist_wminusd_wminusinc_pt_ratio; Scatter2DPtr _hist_wplusdstar_wplusinc_pt_ratio; Scatter2DPtr _hist_wminusdstar_wminusinc_pt_ratio; Histo1DPtr _hist_wplusd_wplusinc_pt ; Histo1DPtr _hist_wminusd_wminusinc_pt; Histo1DPtr _hist_wplusdstar_wplusinc_pt; Histo1DPtr _hist_wminusdstar_wminusinc_pt; // d07-x01 Histo1DPtr _hist_wcjet_jets ; //d08-x01 Scatter2DPtr _hist_wcjet_jets_ratio ; Histo1DPtr _hist_wcjet_jets_plus ; Histo1DPtr _hist_wcjet_jets_minus; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1282447); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1327229.cc b/analyses/pluginATLAS/ATLAS_2014_I1327229.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1327229.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1327229.cc @@ -1,1330 +1,1330 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2014_I1327229 : public Analysis { public: /// Constructor ATLAS_2014_I1327229() : Analysis("ATLAS_2014_I1327229") { } /// Book histograms and initialise projections before the run void init() { // To calculate the acceptance without having the fiducial lepton efficiencies included, this part can be turned off _use_fiducial_lepton_efficiency = true; // Random numbers for simulation of ATLAS detector reconstruction efficiency /// @todo Replace with SmearedParticles etc. srand(160385); // Read in all signal regions _signal_regions = getSignalRegions(); // Set number of events per signal region to 0 for (size_t i = 0; i < _signal_regions.size(); i++) _eventCountsPerSR[_signal_regions[i]] = 0.0; // Final state including all charged and neutral particles const FinalState fs(-5.0, 5.0, 1*GeV); declare(fs, "FS"); // Final state including all charged particles declare(ChargedFinalState(-2.5, 2.5, 1*GeV), "CFS"); // Final state including all visible particles (to calculate MET, Jets etc.) declare(VisibleFinalState(-5.0,5.0),"VFS"); // Final state including all AntiKt 04 Jets VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // Final state including all unstable particles (including taus) - declare(UnstableFinalState(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV),"UFS"); + declare(UnstableParticles(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV),"UFS"); // Final state including all electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Final state including all muons IdentifiedFinalState muons(Cuts::abseta < 2.5 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); /// Book histograms: _h_HTlep_all = bookHisto1D("HTlep_all", 30,0,3000); _h_HTjets_all = bookHisto1D("HTjets_all", 30,0,3000); _h_MET_all = bookHisto1D("MET_all", 30,0,1500); _h_Meff_all = bookHisto1D("Meff_all", 50,0,5000); _h_min_pT_all = bookHisto1D("min_pT_all", 50, 0, 2000); _h_mT_all = bookHisto1D("mT_all", 50, 0, 2000); _h_e_n = bookHisto1D("e_n", 10, -0.5, 9.5); _h_mu_n = bookHisto1D("mu_n", 10, -0.5, 9.5); _h_tau_n = bookHisto1D("tau_n", 10, -0.5, 9.5); _h_pt_1_3l = bookHisto1D("pt_1_3l", 100, 0, 2000); _h_pt_2_3l = bookHisto1D("pt_2_3l", 100, 0, 2000); _h_pt_3_3l = bookHisto1D("pt_3_3l", 100, 0, 2000); _h_pt_1_2ltau = bookHisto1D("pt_1_2ltau", 100, 0, 2000); _h_pt_2_2ltau = bookHisto1D("pt_2_2ltau", 100, 0, 2000); _h_pt_3_2ltau = bookHisto1D("pt_3_2ltau", 100, 0, 2000); _h_excluded = bookHisto1D("excluded", 2, -0.5, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { // Muons Particles muon_candidates; const Particles charged_tracks = apply(event, "CFS").particles(); const Particles visible_particles = apply(event, "VFS").particles(); for (const Particle& mu : apply(event, "muons").particlesByPt() ) { // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of muon itself) double pTinCone = -mu.pT(); for (const Particle& track : charged_tracks ) { if (deltaR(mu.momentum(),track.momentum()) < 0.3 ) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(mu.momentum(),visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reconstruction int muon_id = 13; if (mu.hasAncestor(PID::TAU) || mu.hasAncestor(-PID::TAU)) muon_id = 14; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(muon_id,mu) : 1.0; const bool keep_muon = rand()/static_cast(RAND_MAX)<=eff; // Keep muon if pTCone30/pT < 0.15 and eTCone30/pT < 0.2 and reconstructed if (keep_muon && pTinCone/mu.pT() <= 0.1 && eTinCone/mu.pT() < 0.1) muon_candidates.push_back(mu); } // Electrons Particles electron_candidates; for (const Particle& e : apply(event, "elecs").particlesByPt() ) { // Neglect electrons in crack regions if (inRange(e.abseta(), 1.37, 1.52)) continue; // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of electron itself) double pTinCone = -e.pT(); for (const Particle& track : charged_tracks) { if (deltaR(e.momentum(), track.momentum()) < 0.3 ) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles (except muons) within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(e.momentum(),visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reconstruction int elec_id = 11; if (e.hasAncestor(15) || e.hasAncestor(-15)) elec_id = 12; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(elec_id,e) : 1.0; const bool keep_elec = rand()/static_cast(RAND_MAX)<=eff; // Keep electron if pTCone30/pT < 0.13 and eTCone30/pT < 0.2 and reconstructed if (keep_elec && pTinCone/e.pT() <= 0.1 && eTinCone/e.pT() < 0.1) electron_candidates.push_back(e); } // Taus Particles tau_candidates; - for (const Particle& tau : apply(event, "UFS").particles() ) { + for (const Particle& tau : apply(event, "UFS").particles() ) { // Only pick taus out of all unstable particles if ( tau.abspid() != PID::TAU) continue; // Check that tau has decayed into daughter particles if (tau.genParticle()->end_vertex() == 0) continue; // Calculate visible tau momentum using the tau neutrino momentum in the tau decay FourMomentum daughter_tau_neutrino_momentum = get_tau_neutrino_momentum(tau); Particle tau_vis = tau; tau_vis.setMomentum(tau.momentum()-daughter_tau_neutrino_momentum); // keep only taus in certain eta region and above 15 GeV of visible tau pT if ( tau_vis.pT()/GeV <= 15.0 || tau_vis.abseta() > 2.5) continue; // Get prong number (number of tracks) in tau decay and check if tau decays leptonically unsigned int nprong = 0; bool lep_decaying_tau = false; get_prong_number(tau.genParticle(),nprong,lep_decaying_tau); // Apply reconstruction efficiency and simulate reconstruction int tau_id = 15; if (nprong == 1) tau_id = 15; else if (nprong == 3) tau_id = 16; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(tau_id,tau_vis) : 1.0; const bool keep_tau = rand()/static_cast(RAND_MAX)<=eff; // Keep tau if nprong = 1, it decays hadronically and it is reconstructed if ( !lep_decaying_tau && nprong == 1 && keep_tau) tau_candidates.push_back(tau_vis); } // Jets (all anti-kt R=0.4 jets with pT > 30 GeV and eta < 4.9 Jets jet_candidates; for (const Jet& jet : apply(event, "AntiKtJets04").jetsByPt(30.0*GeV) ) { if (jet.abseta() < 4.9 ) jet_candidates.push_back(jet); } // ETmiss Particles vfs_particles = apply(event, "VFS").particles(); FourMomentum pTmiss; for (const Particle& p : vfs_particles) pTmiss -= p.momentum(); double eTmiss = pTmiss.pT()/GeV; // ------------------------- // Overlap removal // electron - electron Particles electron_candidates_2; for(size_t ie = 0; ie < electron_candidates.size(); ++ie) { const Particle& e = electron_candidates[ie]; bool away = true; // If electron pair within dR < 0.1: remove electron with lower pT for(size_t ie2 = 0; ie2 < electron_candidates_2.size(); ++ie2) { if (deltaR(e.momentum(),electron_candidates_2[ie2].momentum()) < 0.1 ) { away = false; break; } } // If isolated keep it if ( away ) electron_candidates_2.push_back( e ); } // jet - electron Jets recon_jets; for (const Jet& jet : jet_candidates) { bool away = true; // If jet within dR < 0.2 of electron: remove jet for (const Particle& e : electron_candidates_2) { if (deltaR(e.momentum(), jet.momentum()) < 0.2 ) { away = false; break; } } // jet - tau if ( away ) { // If jet within dR < 0.2 of tau: remove jet for (const Particle& tau : tau_candidates) { if (deltaR(tau.momentum(), jet.momentum()) < 0.2 ) { away = false; break; } } } // If isolated keep it if ( away ) recon_jets.push_back( jet ); } // electron - jet Particles recon_leptons, recon_e; for (size_t ie = 0; ie < electron_candidates_2.size(); ++ie) { const Particle& e = electron_candidates_2[ie]; // If electron within 0.2 < dR < 0.4 from any jets: remove electron bool away = true; for (const Jet& jet : recon_jets) { if (deltaR(e.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } // electron - muon // If electron within dR < 0.1 of a muon: remove electron if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(mu.momentum(),e.momentum()) < 0.1) { away = false; break; } } } // If isolated keep it if ( away ) { recon_e.push_back( e ); recon_leptons.push_back( e ); } } // tau - electron Particles recon_tau; for (const Particle& tau : tau_candidates) { bool away = true; // If tau within dR < 0.2 of an electron: remove tau for (const Particle & e : recon_e) { if (deltaR(tau.momentum(),e.momentum()) < 0.2 ) { away = false; break; } } // tau - muon // If tau within dR < 0.2 of a muon: remove tau if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(tau.momentum(), mu.momentum()) < 0.2 ) { away = false; break; } } } // If isolated keep it if (away) recon_tau.push_back( tau ); } // muon - jet Particles recon_mu, trigger_mu; // If muon within dR < 0.4 of a jet: remove muon for (const Particle& mu : muon_candidates ) { bool away = true; for (const Jet& jet : recon_jets) { if (deltaR(mu.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } if (away) { recon_mu.push_back( mu ); recon_leptons.push_back( mu ); if (mu.abseta() < 2.4) trigger_mu.push_back( mu ); } } // End overlap removal // --------------------- // Jet cleaning if (rand()/static_cast(RAND_MAX) <= 0.42) { for (const Jet& jet : recon_jets ) { const double eta = jet.rapidity(); const double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if(jet.pT() > 25*GeV && inRange(eta,-0.1,1.5) && inRange(phi,-0.9,-0.5)) vetoEvent; } } // Event selection // Require at least 3 charged tracks in event if (charged_tracks.size() < 3) vetoEvent; // And at least one e/mu passing trigger if( !( !recon_e.empty() && recon_e[0].pT()>26.*GeV) && !( !trigger_mu.empty() && trigger_mu[0].pT()>26.*GeV) ) { MSG_DEBUG("Hardest lepton fails trigger"); vetoEvent; } // And only accept events with at least 2 electrons and muons and at least 3 leptons in total if (recon_mu.size() + recon_e.size() + recon_tau.size() < 3 || recon_leptons.size() < 2) vetoEvent; // Getting the event weight const double weight = event.weight(); // Sort leptons by decreasing pT sortByPt(recon_leptons); sortByPt(recon_tau); // Calculate HTlep, fill lepton pT histograms & store chosen combination of 3 leptons double HTlep = 0.; Particles chosen_leptons; if (recon_leptons.size() > 2) { _h_pt_1_3l->fill(recon_leptons[0].pT()/GeV, weight); _h_pt_2_3l->fill(recon_leptons[1].pT()/GeV, weight); _h_pt_3_3l->fill(recon_leptons[2].pT()/GeV, weight); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_leptons[2].pT())/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_leptons[2] ); } else { _h_pt_1_2ltau->fill(recon_leptons[0].pT()/GeV, weight); _h_pt_2_2ltau->fill(recon_leptons[1].pT()/GeV, weight); _h_pt_3_2ltau->fill(recon_tau[0].pT()/GeV, weight); HTlep = recon_leptons[0].pT()/GeV + recon_leptons[1].pT()/GeV + recon_tau[0].pT()/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_tau[0] ); } // Calculate mT and mTW variable Particles mT_leptons; Particles mTW_leptons; for (size_t i1 = 0; i1 < 3; i1 ++) { for (size_t i2 = i1+1; i2 < 3; i2 ++) { double OSSF_inv_mass = isOSSF_mass(chosen_leptons[i1],chosen_leptons[i2]); if (OSSF_inv_mass != 0.) { for (size_t i3 = 0; i3 < 3 ; i3 ++) { if (i3 != i2 && i3 != i1) { mT_leptons.push_back(chosen_leptons[i3]); if ( fabs(91.0 - OSSF_inv_mass) < 20. ) mTW_leptons.push_back(chosen_leptons[i3]); } } } else { mT_leptons.push_back(chosen_leptons[0]); mTW_leptons.push_back(chosen_leptons[0]); } } } sortByPt(mT_leptons); sortByPt(mTW_leptons); double mT = sqrt(2*pTmiss.pT()/GeV*mT_leptons[0].pT()/GeV*(1-cos(pTmiss.phi()-mT_leptons[0].phi()))); double mTW = sqrt(2*pTmiss.pT()/GeV*mTW_leptons[0].pT()/GeV*(1-cos(pTmiss.phi()-mTW_leptons[0].phi()))); // Calculate Min pT variable double min_pT = chosen_leptons[2].pT()/GeV; // Number of prompt e/mu and had taus _h_e_n->fill(recon_e.size(),weight); _h_mu_n->fill(recon_mu.size(),weight); _h_tau_n->fill(recon_tau.size(),weight); // Calculate HTjets variable double HTjets = 0.; for (const Jet& jet : recon_jets) HTjets += jet.pT()/GeV; // Calculate meff variable double meff = eTmiss + HTjets; Particles all_leptons; for (const Particle& e : recon_e ) { meff += e.pT()/GeV; all_leptons.push_back( e ); } for (const Particle& mu : recon_mu) { meff += mu.pT()/GeV; all_leptons.push_back( mu ); } for (const Particle& tau : recon_tau) { meff += tau.pT()/GeV; all_leptons.push_back( tau ); } // Fill histograms of kinematic variables _h_HTlep_all->fill(HTlep,weight); _h_HTjets_all->fill(HTjets,weight); _h_MET_all->fill(eTmiss,weight); _h_Meff_all->fill(meff,weight); _h_min_pT_all->fill(min_pT,weight); _h_mT_all->fill(mT,weight); // Determine signal region (3l / 2ltau , onZ / offZ OSSF / offZ no-OSSF) // 3l vs. 2ltau string basic_signal_region; if (recon_mu.size() + recon_e.size() > 2) basic_signal_region += "3l_"; else if ( (recon_mu.size() + recon_e.size() == 2) && (recon_tau.size() > 0)) basic_signal_region += "2ltau_"; // Is there an OSSF pair or a three lepton combination with an invariant mass close to the Z mass int onZ = isonZ(chosen_leptons); if (onZ == 1) basic_signal_region += "onZ"; else if (onZ == 0) { bool OSSF = isOSSF(chosen_leptons); if (OSSF) basic_signal_region += "offZ_OSSF"; else basic_signal_region += "offZ_noOSSF"; } // Check in which signal regions this event falls and adjust event counters // INFO: The b-jet signal regions of the paper are not included in this Rivet implementation fillEventCountsPerSR(basic_signal_region,onZ,HTlep,eTmiss,HTjets,meff,min_pT,mTW,weight); } /// Normalise histograms etc., after the run void finalize() { // Normalize to an integrated luminosity of 1 fb-1 double norm = crossSection()/femtobarn/sumOfWeights(); string best_signal_region = ""; double ratio_best_SR = 0.; // Loop over all signal regions and find signal region with best sensitivity (ratio signal events/visible cross-section) for (size_t i = 0; i < _signal_regions.size(); i++) { double signal_events = _eventCountsPerSR[_signal_regions[i]] * norm; // Use expected upper limits to find best signal region: double UL95 = getUpperLimit(_signal_regions[i],false); double ratio = signal_events / UL95; if (ratio > ratio_best_SR) { best_signal_region = _signal_regions.at(i); ratio_best_SR = ratio; } } double signal_events_best_SR = _eventCountsPerSR[best_signal_region] * norm; double exp_UL_best_SR = getUpperLimit(best_signal_region, false); double obs_UL_best_SR = getUpperLimit(best_signal_region, true); // Print out result cout << "----------------------------------------------------------------------------------------" << endl; cout << "Number of total events: " << sumOfWeights() << endl; cout << "Best signal region: " << best_signal_region << endl; cout << "Normalized number of signal events in this best signal region (per fb-1): " << signal_events_best_SR << endl; cout << "Efficiency*Acceptance: " << _eventCountsPerSR[best_signal_region]/sumOfWeights() << endl; cout << "Cross-section [fb]: " << crossSection()/femtobarn << endl; cout << "Expected visible cross-section (per fb-1): " << exp_UL_best_SR << endl; cout << "Ratio (signal events / expected visible cross-section): " << ratio_best_SR << endl; cout << "Observed visible cross-section (per fb-1): " << obs_UL_best_SR << endl; cout << "Ratio (signal events / observed visible cross-section): " << signal_events_best_SR/obs_UL_best_SR << endl; cout << "----------------------------------------------------------------------------------------" << endl; cout << "Using the EXPECTED limits (visible cross-section) of the analysis: " << endl; if (signal_events_best_SR > exp_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% C.L." << endl; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << endl; cout << "Using the OBSERVED limits (visible cross-section) of the analysis: " << endl; if (signal_events_best_SR > obs_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% C.L." << endl; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << endl; cout << "INFO: The b-jet signal regions of the paper are not included in this Rivet implementation." << endl; cout << "----------------------------------------------------------------------------------------" << endl; /// Normalize to cross section if (norm != 0) { scale(_h_HTlep_all, norm); scale(_h_HTjets_all, norm); scale(_h_MET_all, norm); scale(_h_Meff_all, norm); scale(_h_min_pT_all, norm); scale(_h_mT_all, norm); scale(_h_pt_1_3l, norm); scale(_h_pt_2_3l, norm); scale(_h_pt_3_3l, norm); scale(_h_pt_1_2ltau, norm); scale(_h_pt_2_2ltau, norm); scale(_h_pt_3_2ltau, norm); scale(_h_e_n, norm); scale(_h_mu_n, norm); scale(_h_tau_n, norm); scale(_h_excluded, norm); } } /// Helper functions //@{ /// Function giving a list of all signal regions vector getSignalRegions() { // List of basic signal regions vector basic_signal_regions; basic_signal_regions.push_back("3l_offZ_OSSF"); basic_signal_regions.push_back("3l_offZ_noOSSF"); basic_signal_regions.push_back("3l_onZ"); basic_signal_regions.push_back("2ltau_offZ_OSSF"); basic_signal_regions.push_back("2ltau_offZ_noOSSF"); basic_signal_regions.push_back("2ltau_onZ"); // List of kinematic variables vector kinematic_variables; kinematic_variables.push_back("HTlep"); kinematic_variables.push_back("METStrong"); kinematic_variables.push_back("METWeak"); kinematic_variables.push_back("Meff"); kinematic_variables.push_back("MeffStrong"); kinematic_variables.push_back("MeffMt"); kinematic_variables.push_back("MinPt"); vector signal_regions; // Loop over all kinematic variables and basic signal regions for (size_t i0 = 0; i0 < kinematic_variables.size(); i0++) { for (size_t i1 = 0; i1 < basic_signal_regions.size(); i1++) { // Is signal region onZ? int onZ = (basic_signal_regions[i1].find("onZ") != string::npos) ? 1 : 0; // Get cut values for this kinematic variable vector cut_values = getCutsPerSignalRegion(kinematic_variables[i0], onZ); // Loop over all cut values for (size_t i2 = 0; i2 < cut_values.size(); i2++) { // Push signal region into vector signal_regions.push_back( kinematic_variables[i0] + "_" + basic_signal_regions[i1] + "_cut_" + toString(cut_values[i2]) ); } } } return signal_regions; } /// Function giving all cut values per kinematic variable vector getCutsPerSignalRegion(const string& signal_region, int onZ = 0) { vector cutValues; // Cut values for HTlep if (signal_region.compare("HTlep") == 0) { cutValues.push_back(0); cutValues.push_back(200); cutValues.push_back(500); cutValues.push_back(800); } // Cut values for MinPt else if (signal_region.compare("MinPt") == 0) { cutValues.push_back(0); cutValues.push_back(50); cutValues.push_back(100); cutValues.push_back(150); } // Cut values for METStrong (HTjets > 150 GeV) and METWeak (HTjets < 150 GeV) else if (signal_region.compare("METStrong") == 0 || signal_region.compare("METWeak") == 0) { cutValues.push_back(0); cutValues.push_back(100); cutValues.push_back(200); cutValues.push_back(300); } // Cut values for Meff if (signal_region.compare("Meff") == 0) { cutValues.push_back(0); cutValues.push_back(600); cutValues.push_back(1000); cutValues.push_back(1500); } // Cut values for MeffStrong (MET > 100 GeV) if ((signal_region.compare("MeffStrong") == 0 || signal_region.compare("MeffMt") == 0) && onZ ==1) { cutValues.push_back(0); cutValues.push_back(600); cutValues.push_back(1200); } return cutValues; } /// function fills map _eventCountsPerSR by looping over all signal regions /// and looking if the event falls into this signal region void fillEventCountsPerSR(const string& basic_signal_region, int onZ, double HTlep, double eTmiss, double HTjets, double meff, double min_pT, double mTW, double weight) { // Get cut values for HTlep, loop over them and add event if cut is passed vector cut_values = getCutsPerSignalRegion("HTlep", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (HTlep > cut_values[i]) _eventCountsPerSR[("HTlep_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for MinPt, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MinPt", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (min_pT > cut_values[i]) _eventCountsPerSR[("MinPt_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for METStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets > 150.) _eventCountsPerSR[("METStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for METWeak, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METWeak", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets <= 150.) _eventCountsPerSR[("METWeak_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for Meff, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("Meff", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i]) _eventCountsPerSR[("Meff_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for MeffStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && eTmiss > 100.) _eventCountsPerSR[("MeffStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } // Get cut values for MeffMt, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffMt", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && mTW > 100. && onZ == 1) _eventCountsPerSR[("MeffMt_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight; } } /// Function returning 4-momentum of daughter-particle if it is a tau neutrino FourMomentum get_tau_neutrino_momentum(const Particle& p) { assert(p.abspid() == PID::TAU); const GenVertex* dv = p.genParticle()->end_vertex(); assert(dv != NULL); // Loop over all daughter particles for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { if (abs((*pp)->pdg_id()) == PID::NU_TAU) return FourMomentum((*pp)->momentum()); } return FourMomentum(); } /// Function calculating the prong number of taus void get_prong_number(const GenParticle* p, unsigned int& nprong, bool& lep_decaying_tau) { assert(p != NULL); const GenVertex* dv = p->end_vertex(); assert(dv != NULL); for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { // If they have status 1 and are charged they will produce a track and the prong number is +1 if ((*pp)->status() == 1 ) { const int id = (*pp)->pdg_id(); if (Rivet::PID::charge(id) != 0 ) ++nprong; // Check if tau decays leptonically if (( abs(id) == PID::ELECTRON || abs(id) == PID::MUON || abs(id) == PID::TAU ) && abs(p->pdg_id()) == PID::TAU) lep_decaying_tau = true; } // If the status of the daughter particle is 2 it is unstable and the further decays are checked else if ((*pp)->status() == 2 ) { get_prong_number((*pp),nprong,lep_decaying_tau); } } } /// Function giving fiducial lepton efficiency double apply_reco_eff(int flavor, const Particle& p) { double pt = p.pT()/GeV; double eta = p.eta(); double eff = 0.; if (flavor == 11) { // weight prompt electron -- now including data/MC ID SF in eff. double avgrate = 0.685; const static double wz_ele[] = {0.0256,0.522,0.607,0.654,0.708,0.737,0.761,0.784,0.815,0.835,0.851,0.841,0.898}; // double ewz_ele[] = {0.000257,0.00492,0.00524,0.00519,0.00396,0.00449,0.00538,0.00513,0.00773,0.00753,0.0209,0.0964,0.259}; int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100 && pt < 200) ibin = 9; if(pt > 200 && pt < 400) ibin = 10; if(pt > 400 && pt < 600) ibin = 11; if(pt > 600) ibin = 12; double eff_pt = 0.; eff_pt = wz_ele[ibin]; eta = fabs(eta); const static double wz_ele_eta[] = {0.65,0.714,0.722,0.689,0.635,0.615}; // double ewz_ele_eta[] = {0.00642,0.00355,0.00335,0.004,0.00368,0.00422}; ibin = 0; if(eta > 0 && eta < 0.1) ibin = 0; if(eta > 0.1 && eta < 0.5) ibin = 1; if(eta > 0.5 && eta < 1.0) ibin = 2; if(eta > 1.0 && eta < 1.5) ibin = 3; if(eta > 1.5 && eta < 2.0) ibin = 4; if(eta > 2.0 && eta < 2.5) ibin = 5; double eff_eta = 0.; eff_eta = wz_ele_eta[ibin]; eff = (eff_pt * eff_eta) / avgrate; } if (flavor == 12) { // weight electron from tau double avgrate = 0.476; const static double wz_ele[] = {0.00855,0.409,0.442,0.55,0.632,0.616,0.615,0.642,0.72,0.617}; // double ewz_ele[] = {0.000573,0.0291,0.0366,0.0352,0.0363,0.0474,0.0628,0.0709,0.125,0.109}; int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100) ibin = 9; double eff_pt = 0.; eff_pt = wz_ele[ibin]; eta = fabs(eta); const static double wz_ele_eta[] = {0.546,0.5,0.513,0.421,0.47,0.433}; //double ewz_ele_eta[] = {0.0566,0.0257,0.0263,0.0263,0.0303,0.0321}; ibin = 0; if(eta > 0 && eta < 0.1) ibin = 0; if(eta > 0.1 && eta < 0.5) ibin = 1; if(eta > 0.5 && eta < 1.0) ibin = 2; if(eta > 1.0 && eta < 1.5) ibin = 3; if(eta > 1.5 && eta < 2.0) ibin = 4; if(eta > 2.0 && eta < 2.5) ibin = 5; double eff_eta = 0.; eff_eta = wz_ele_eta[ibin]; eff = (eff_pt * eff_eta) / avgrate; } if (flavor == 13) { // weight prompt muon int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100 && pt < 200) ibin = 9; if(pt > 200 && pt < 400) ibin = 10; if(pt > 400) ibin = 11; if(fabs(eta) < 0.1) { const static double wz_mu[] = {0.00705,0.402,0.478,0.49,0.492,0.499,0.527,0.512,0.53,0.528,0.465,0.465}; //double ewz_mu[] = {0.000298,0.0154,0.017,0.0158,0.0114,0.0123,0.0155,0.0133,0.0196,0.0182,0.0414,0.0414}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } if(fabs(eta) > 0.1) { const static double wz_mu[] = {0.0224,0.839,0.887,0.91,0.919,0.923,0.925,0.925,0.922,0.918,0.884,0.834}; //double ewz_mu[] = {0.000213,0.00753,0.0074,0.007,0.00496,0.00534,0.00632,0.00583,0.00849,0.00804,0.0224,0.0963}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } } if (flavor == 14) { // weight muon from tau int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100) ibin = 9; if(fabs(eta) < 0.1) { const static double wz_mu[] = {0.0,0.664,0.124,0.133,0.527,0.283,0.495,0.25,0.5,0.331}; //double ewz_mu[] = {0.0,0.192,0.0437,0.0343,0.128,0.107,0.202,0.125,0.25,0.191}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } if(fabs(eta) > 0.1) { const static double wz_mu[] = {0.0,0.617,0.655,0.676,0.705,0.738,0.712,0.783,0.646,0.745}; //double ewz_mu[] = {0.0,0.043,0.0564,0.0448,0.0405,0.0576,0.065,0.0825,0.102,0.132}; double eff_pt = 0.; eff_pt = wz_mu[ibin]; eff = eff_pt; } } if (flavor == 15) { // weight hadronic tau 1p double avgrate = 0.16; const static double wz_tau1p[] = {0.0,0.0311,0.148,0.229,0.217,0.292,0.245,0.307,0.227,0.277}; //double ewz_tau1p[] = {0.0,0.00211,0.0117,0.0179,0.0134,0.0248,0.0264,0.0322,0.0331,0.0427}; int ibin = 0; if(pt > 10 && pt < 15) ibin = 0; if(pt > 15 && pt < 20) ibin = 1; if(pt > 20 && pt < 25) ibin = 2; if(pt > 25 && pt < 30) ibin = 3; if(pt > 30 && pt < 40) ibin = 4; if(pt > 40 && pt < 50) ibin = 5; if(pt > 50 && pt < 60) ibin = 6; if(pt > 60 && pt < 80) ibin = 7; if(pt > 80 && pt < 100) ibin = 8; if(pt > 100) ibin = 9; double eff_pt = 0.; eff_pt = wz_tau1p[ibin]; const static double wz_tau1p_eta[] = {0.166,0.15,0.188,0.175,0.142,0.109}; //double ewz_tau1p_eta[] ={0.0166,0.00853,0.0097,0.00985,0.00949,0.00842}; ibin = 0; if(eta > 0.0 && eta < 0.1) ibin = 0; if(eta > 0.1 && eta < 0.5) ibin = 1; if(eta > 0.5 && eta < 1.0) ibin = 2; if(eta > 1.0 && eta < 1.5) ibin = 3; if(eta > 1.5 && eta < 2.0) ibin = 4; if(eta > 2.0 && eta < 2.5) ibin = 5; double eff_eta = 0.; eff_eta = wz_tau1p_eta[ibin]; eff = (eff_pt * eff_eta) / avgrate; } return eff; } /// Function giving observed and expected upper limits (on the visible cross-section) double getUpperLimit(const string& signal_region, bool observed) { map upperLimitsObserved; map upperLimitsExpected; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_200"] = 0.704; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_500"] = 0.182; upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_800"] = 0.147; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_200"] = 1.677; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_500"] = 0.141; upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_800"] = 0.155; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_200"] = 0.341; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_500"] = 0.221; upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_800"] = 0.140; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_200"] = 0.413; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_500"] = 0.138; upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_800"] = 0.150; upperLimitsObserved["HTlep_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["HTlep_3l_onZ_cut_200"] = 3.579; upperLimitsObserved["HTlep_3l_onZ_cut_500"] = 0.466; upperLimitsObserved["HTlep_3l_onZ_cut_800"] = 0.298; upperLimitsObserved["HTlep_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["HTlep_2ltau_onZ_cut_200"] = 3.141; upperLimitsObserved["HTlep_2ltau_onZ_cut_500"] = 0.290; upperLimitsObserved["HTlep_2ltau_onZ_cut_800"] = 0.157; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_0"] = 1.111; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_100"] = 0.354; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_200"] = 0.236; upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_300"] = 0.150; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_0"] = 1.881; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_100"] = 0.406; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_200"] = 0.194; upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_300"] = 0.134; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_0"] = 0.770; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_100"] = 0.295; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_200"] = 0.149; upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_300"] = 0.140; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_0"] = 2.003; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_100"] = 0.806; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_200"] = 0.227; upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_300"] = 0.138; upperLimitsObserved["METStrong_3l_onZ_cut_0"] = 6.383; upperLimitsObserved["METStrong_3l_onZ_cut_100"] = 0.959; upperLimitsObserved["METStrong_3l_onZ_cut_200"] = 0.549; upperLimitsObserved["METStrong_3l_onZ_cut_300"] = 0.182; upperLimitsObserved["METStrong_2ltau_onZ_cut_0"] = 10.658; upperLimitsObserved["METStrong_2ltau_onZ_cut_100"] = 0.637; upperLimitsObserved["METStrong_2ltau_onZ_cut_200"] = 0.291; upperLimitsObserved["METStrong_2ltau_onZ_cut_300"] = 0.227; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_0"] = 1.802; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_100"] = 0.344; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_200"] = 0.189; upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_300"] = 0.148; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_0"] = 12.321; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_100"] = 0.430; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_200"] = 0.137; upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_300"] = 0.134; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_0"] = 0.562; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_100"] = 0.153; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_200"] = 0.154; upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_300"] = 0.141; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_0"] = 2.475; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_100"] = 0.244; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_200"] = 0.141; upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_300"] = 0.142; upperLimitsObserved["METWeak_3l_onZ_cut_0"] = 24.769; upperLimitsObserved["METWeak_3l_onZ_cut_100"] = 0.690; upperLimitsObserved["METWeak_3l_onZ_cut_200"] = 0.198; upperLimitsObserved["METWeak_3l_onZ_cut_300"] = 0.138; upperLimitsObserved["METWeak_2ltau_onZ_cut_0"] = 194.360; upperLimitsObserved["METWeak_2ltau_onZ_cut_100"] = 0.287; upperLimitsObserved["METWeak_2ltau_onZ_cut_200"] = 0.144; upperLimitsObserved["METWeak_2ltau_onZ_cut_300"] = 0.130; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_600"] = 0.487; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_1000"] = 0.156; upperLimitsObserved["Meff_3l_offZ_OSSF_cut_1500"] = 0.140; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_600"] = 0.687; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_1000"] = 0.224; upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_1500"] = 0.155; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_600"] = 0.249; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_1000"] = 0.194; upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_1500"] = 0.145; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_600"] = 0.772; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_1000"] = 0.218; upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_1500"] = 0.204; upperLimitsObserved["Meff_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["Meff_3l_onZ_cut_600"] = 2.933; upperLimitsObserved["Meff_3l_onZ_cut_1000"] = 0.912; upperLimitsObserved["Meff_3l_onZ_cut_1500"] = 0.225; upperLimitsObserved["Meff_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["Meff_2ltau_onZ_cut_600"] = 1.486; upperLimitsObserved["Meff_2ltau_onZ_cut_1000"] = 0.641; upperLimitsObserved["Meff_2ltau_onZ_cut_1500"] = 0.204; upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_0"] = 0.479; upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_600"] = 0.353; upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_1200"] = 0.187; upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_0"] = 0.617; upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_600"] = 0.320; upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_1200"] = 0.281; upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_0"] = 0.408; upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_600"] = 0.240; upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_1200"] = 0.150; upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_0"] = 0.774; upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_600"] = 0.417; upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_1200"] = 0.266; upperLimitsObserved["MeffStrong_3l_onZ_cut_0"] = 1.208; upperLimitsObserved["MeffStrong_3l_onZ_cut_600"] = 0.837; upperLimitsObserved["MeffStrong_3l_onZ_cut_1200"] = 0.269; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_0"] = 0.605; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_600"] = 0.420; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_1200"] = 0.141; upperLimitsObserved["MeffMt_3l_onZ_cut_0"] = 1.832; upperLimitsObserved["MeffMt_3l_onZ_cut_600"] = 0.862; upperLimitsObserved["MeffMt_3l_onZ_cut_1200"] = 0.222; upperLimitsObserved["MeffMt_2ltau_onZ_cut_0"] = 1.309; upperLimitsObserved["MeffMt_2ltau_onZ_cut_600"] = 0.481; upperLimitsObserved["MeffMt_2ltau_onZ_cut_1200"] = 0.146; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_50"] = 0.500; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_100"] = 0.203; upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_150"] = 0.128; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_50"] = 0.859; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_100"] = 0.158; upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_150"] = 0.155; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_50"] = 0.295; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_100"] = 0.148; upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_150"] = 0.137; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_50"] = 0.314; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_100"] = 0.134; upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_150"] = 0.140; upperLimitsObserved["MinPt_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["MinPt_3l_onZ_cut_50"] = 1.767; upperLimitsObserved["MinPt_3l_onZ_cut_100"] = 0.690; upperLimitsObserved["MinPt_3l_onZ_cut_150"] = 0.301; upperLimitsObserved["MinPt_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["MinPt_2ltau_onZ_cut_50"] = 1.050; upperLimitsObserved["MinPt_2ltau_onZ_cut_100"] = 0.155; upperLimitsObserved["MinPt_2ltau_onZ_cut_150"] = 0.146; upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_0"] = 2.435; upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_1"] = 0.865; upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_2"] = 0.474; upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_0"] = 13.901; upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_1"] = 1.566; upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_2"] = 0.426; upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_0"] = 1.054; upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_1"] = 0.643; upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_2"] = 0.321; upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_0"] = 4.276; upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_1"] = 2.435; upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_2"] = 1.073; upperLimitsObserved["nbtag_3l_onZ_cut_0"] = 29.804; upperLimitsObserved["nbtag_3l_onZ_cut_1"] = 3.908; upperLimitsObserved["nbtag_3l_onZ_cut_2"] = 0.704; upperLimitsObserved["nbtag_2ltau_onZ_cut_0"] = 205.091; upperLimitsObserved["nbtag_2ltau_onZ_cut_1"] = 9.377; upperLimitsObserved["nbtag_2ltau_onZ_cut_2"] = 0.657; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_200"] = 1.175; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_500"] = 0.265; upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_800"] = 0.155; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_200"] = 1.803; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_500"] = 0.159; upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_800"] = 0.155; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_200"] = 0.340; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_500"] = 0.218; upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_800"] = 0.140; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_200"] = 0.599; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_500"] = 0.146; upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_800"] = 0.148; upperLimitsExpected["HTlep_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["HTlep_3l_onZ_cut_200"] = 4.879; upperLimitsExpected["HTlep_3l_onZ_cut_500"] = 0.473; upperLimitsExpected["HTlep_3l_onZ_cut_800"] = 0.266; upperLimitsExpected["HTlep_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["HTlep_2ltau_onZ_cut_200"] = 3.676; upperLimitsExpected["HTlep_2ltau_onZ_cut_500"] = 0.235; upperLimitsExpected["HTlep_2ltau_onZ_cut_800"] = 0.150; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_0"] = 1.196; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_100"] = 0.423; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_200"] = 0.208; upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_300"] = 0.158; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_0"] = 2.158; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_100"] = 0.461; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_200"] = 0.186; upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_300"] = 0.138; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_0"] = 0.495; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_100"] = 0.284; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_200"] = 0.150; upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_300"] = 0.146; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_0"] = 1.967; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_100"] = 0.732; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_200"] = 0.225; upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_300"] = 0.147; upperLimitsExpected["METStrong_3l_onZ_cut_0"] = 7.157; upperLimitsExpected["METStrong_3l_onZ_cut_100"] = 1.342; upperLimitsExpected["METStrong_3l_onZ_cut_200"] = 0.508; upperLimitsExpected["METStrong_3l_onZ_cut_300"] = 0.228; upperLimitsExpected["METStrong_2ltau_onZ_cut_0"] = 12.441; upperLimitsExpected["METStrong_2ltau_onZ_cut_100"] = 0.534; upperLimitsExpected["METStrong_2ltau_onZ_cut_200"] = 0.243; upperLimitsExpected["METStrong_2ltau_onZ_cut_300"] = 0.218; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_0"] = 2.199; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_100"] = 0.391; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_200"] = 0.177; upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_300"] = 0.144; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_0"] = 12.431; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_100"] = 0.358; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_200"] = 0.150; upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_300"] = 0.135; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_0"] = 0.577; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_100"] = 0.214; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_200"] = 0.155; upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_300"] = 0.140; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_0"] = 2.474; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_100"] = 0.382; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_200"] = 0.144; upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_300"] = 0.146; upperLimitsExpected["METWeak_3l_onZ_cut_0"] = 26.305; upperLimitsExpected["METWeak_3l_onZ_cut_100"] = 1.227; upperLimitsExpected["METWeak_3l_onZ_cut_200"] = 0.311; upperLimitsExpected["METWeak_3l_onZ_cut_300"] = 0.188; upperLimitsExpected["METWeak_2ltau_onZ_cut_0"] = 205.198; upperLimitsExpected["METWeak_2ltau_onZ_cut_100"] = 0.399; upperLimitsExpected["METWeak_2ltau_onZ_cut_200"] = 0.166; upperLimitsExpected["METWeak_2ltau_onZ_cut_300"] = 0.140; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_600"] = 0.649; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_1000"] = 0.252; upperLimitsExpected["Meff_3l_offZ_OSSF_cut_1500"] = 0.150; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_600"] = 0.657; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_1000"] = 0.226; upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_1500"] = 0.154; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_600"] = 0.265; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_1000"] = 0.176; upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_1500"] = 0.146; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_600"] = 0.678; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_1000"] = 0.243; upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_1500"] = 0.184; upperLimitsExpected["Meff_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["Meff_3l_onZ_cut_600"] = 3.219; upperLimitsExpected["Meff_3l_onZ_cut_1000"] = 0.905; upperLimitsExpected["Meff_3l_onZ_cut_1500"] = 0.261; upperLimitsExpected["Meff_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["Meff_2ltau_onZ_cut_600"] = 1.680; upperLimitsExpected["Meff_2ltau_onZ_cut_1000"] = 0.375; upperLimitsExpected["Meff_2ltau_onZ_cut_1500"] = 0.178; upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_0"] = 0.571; upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_600"] = 0.386; upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_1200"] = 0.177; upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_0"] = 0.605; upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_600"] = 0.335; upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_1200"] = 0.249; upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_0"] = 0.373; upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_600"] = 0.223; upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_1200"] = 0.150; upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_0"] = 0.873; upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_600"] = 0.428; upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_1200"] = 0.210; upperLimitsExpected["MeffStrong_3l_onZ_cut_0"] = 2.034; upperLimitsExpected["MeffStrong_3l_onZ_cut_600"] = 1.093; upperLimitsExpected["MeffStrong_3l_onZ_cut_1200"] = 0.293; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_0"] = 0.690; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_600"] = 0.392; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_1200"] = 0.156; upperLimitsExpected["MeffMt_3l_onZ_cut_0"] = 2.483; upperLimitsExpected["MeffMt_3l_onZ_cut_600"] = 0.845; upperLimitsExpected["MeffMt_3l_onZ_cut_1200"] = 0.255; upperLimitsExpected["MeffMt_2ltau_onZ_cut_0"] = 1.448; upperLimitsExpected["MeffMt_2ltau_onZ_cut_600"] = 0.391; upperLimitsExpected["MeffMt_2ltau_onZ_cut_1200"] = 0.146; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_50"] = 0.703; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_100"] = 0.207; upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_150"] = 0.143; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_50"] = 0.705; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_100"] = 0.149; upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_150"] = 0.155; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_50"] = 0.249; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_100"] = 0.135; upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_150"] = 0.136; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_50"] = 0.339; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_100"] = 0.149; upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_150"] = 0.145; upperLimitsExpected["MinPt_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["MinPt_3l_onZ_cut_50"] = 2.260; upperLimitsExpected["MinPt_3l_onZ_cut_100"] = 0.438; upperLimitsExpected["MinPt_3l_onZ_cut_150"] = 0.305; upperLimitsExpected["MinPt_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["MinPt_2ltau_onZ_cut_50"] = 1.335; upperLimitsExpected["MinPt_2ltau_onZ_cut_100"] = 0.162; upperLimitsExpected["MinPt_2ltau_onZ_cut_150"] = 0.149; upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_0"] = 2.893; upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_1"] = 0.923; upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_2"] = 0.452; upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_0"] = 14.293; upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_1"] = 1.774; upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_2"] = 0.549; upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_0"] = 0.836; upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_1"] = 0.594; upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_2"] = 0.298; upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_0"] = 4.132; upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_1"] = 2.358; upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_2"] = 0.958; upperLimitsExpected["nbtag_3l_onZ_cut_0"] = 32.181; upperLimitsExpected["nbtag_3l_onZ_cut_1"] = 3.868; upperLimitsExpected["nbtag_3l_onZ_cut_2"] = 0.887; upperLimitsExpected["nbtag_2ltau_onZ_cut_0"] = 217.801; upperLimitsExpected["nbtag_2ltau_onZ_cut_1"] = 9.397; upperLimitsExpected["nbtag_2ltau_onZ_cut_2"] = 0.787; if (observed) return upperLimitsObserved[signal_region]; else return upperLimitsExpected[signal_region]; } /// Function checking if there is an OSSF lepton pair or a combination of 3 leptons with an invariant mass close to the Z mass int isonZ (const Particles& particles) { int onZ = 0; double best_mass_2 = 999.; double best_mass_3 = 999.; // Loop over all 2 particle combinations to find invariant mass of OSSF pair closest to Z mass for (const Particle& p1 : particles) { for (const Particle& p2 : particles) { double mass_difference_2_old = fabs(91.0 - best_mass_2); double mass_difference_2_new = fabs(91.0 - (p1.momentum() + p2.momentum()).mass()/GeV); // If particle combination is OSSF pair calculate mass difference to Z mass if ((p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169)) { // Get invariant mass closest to Z mass if (mass_difference_2_new < mass_difference_2_old) best_mass_2 = (p1.momentum() + p2.momentum()).mass()/GeV; // In case there is an OSSF pair take also 3rd lepton into account (e.g. from FSR and photon to electron conversion) for (const Particle& p3 : particles ) { double mass_difference_3_old = fabs(91.0 - best_mass_3); double mass_difference_3_new = fabs(91.0 - (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV); if (mass_difference_3_new < mass_difference_3_old) best_mass_3 = (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV; } } } } // Pick the minimum invariant mass of the best OSSF pair combination and the best 3 lepton combination double best_mass = min(best_mass_2,best_mass_3); // if this mass is in a 20 GeV window around the Z mass, the event is classified as onZ if ( fabs(91.0 - best_mass) < 20. ) onZ = 1; return onZ; } /// function checking if two leptons are an OSSF lepton pair and giving out the invariant mass (0 if no OSSF pair) double isOSSF_mass (const Particle& p1, const Particle& p2) { double inv_mass = 0.; // Is particle combination OSSF pair? if ((p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169)) { // Get invariant mass inv_mass = (p1.momentum() + p2.momentum()).mass()/GeV; } return inv_mass; } /// Function checking if there is an OSSF lepton pair bool isOSSF (const Particles& particles) { for (size_t i1=0 ; i1 < 3 ; i1 ++) { for (size_t i2 = i1+1 ; i2 < 3 ; i2 ++) { if ((particles[i1].pid()*particles[i2].pid() == -121 || particles[i1].pid()*particles[i2].pid() == -169)) { return true; } } } return false; } //@} private: /// Histograms //@{ Histo1DPtr _h_HTlep_all, _h_HTjets_all, _h_MET_all, _h_Meff_all, _h_min_pT_all, _h_mT_all; Histo1DPtr _h_pt_1_3l, _h_pt_2_3l, _h_pt_3_3l, _h_pt_1_2ltau, _h_pt_2_2ltau, _h_pt_3_2ltau; Histo1DPtr _h_e_n, _h_mu_n, _h_tau_n; Histo1DPtr _h_excluded; //@} /// Fiducial efficiencies to model the effects of the ATLAS detector bool _use_fiducial_lepton_efficiency; /// List of signal regions and event counts per signal region vector _signal_regions; map _eventCountsPerSR; }; DECLARE_RIVET_PLUGIN(ATLAS_2014_I1327229); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1394865.cc b/analyses/pluginATLAS/ATLAS_2015_I1394865.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1394865.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1394865.cc @@ -1,270 +1,270 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/InvMassFinalState.hh" namespace Rivet { /// Inclusive 4-lepton lineshape class ATLAS_2015_I1394865 : public Analysis { public: /// Default constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1394865); void init() { FinalState fs(Cuts::abseta < 5.0); IdentifiedFinalState photon(fs, PID::PHOTON); IdentifiedFinalState bare_EL(fs, {PID::ELECTRON, -PID::ELECTRON}); IdentifiedFinalState bare_MU(fs, {PID::MUON, -PID::MUON}); // Selection 1: ZZ-> llll selection Cut etaranges_el = Cuts::abseta < 2.5 && Cuts::pT > 7*GeV; Cut etaranges_mu = Cuts::abseta < 2.7 && Cuts::pT > 6*GeV; DressedLeptons electron_sel4l(photon, bare_EL, 0.1, etaranges_el); declare(electron_sel4l, "ELECTRON_sel4l"); DressedLeptons muon_sel4l(photon, bare_MU, 0.1, etaranges_mu); declare(muon_sel4l, "MUON_sel4l"); // Both ZZ on-shell histos _h_ZZ_mZZ = bookHisto1D(1, 1, 1); _h_ZZ_pTZZ = bookHisto1D(2, 1, 1); } /// Do the analysis void analyze(const Event& e) { const double weight = e.weight(); //////////////////////////////////////////////////////////////////// // Preselection of leptons for ZZ-> llll final state //////////////////////////////////////////////////////////////////// Particles leptons_sel4l; const vector& mu_sel4l = apply(e, "MUON_sel4l").dressedLeptons(); const vector& el_sel4l = apply(e, "ELECTRON_sel4l").dressedLeptons(); const vector leptonsFS_sel4l = mu_sel4l + el_sel4l; // leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() ); // leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() ); // mu: pT > 6 GeV, eta < 2.7; ele: pT > 7 GeV, eta < 2.5 for (const DressedLepton& l : leptonsFS_sel4l) { if (l.abspid() == PID::ELECTRON) leptons_sel4l.push_back(l); // REDUNDANT: if (l.pT() > 7*GeV && l.abseta() < 2.5) else if (l.abspid() == PID::MUON) leptons_sel4l.push_back(l); // REDUNDANT: if (l.pT() > 6*GeV && l.abseta() < 2.7) } ////////////////////////////////////////////////////////////////// // Exactly two opposite charged leptons ////////////////////////////////////////////////////////////////// // Calculate total 'flavour' charge double totalcharge = 0; for (const Particle& l : leptons_sel4l) totalcharge += l.pid(); // Analyze 4 lepton events if (leptons_sel4l.size() != 4 || totalcharge != 0) vetoEvent; // Identify Z states from 4 lepton pairs Zstate Z1, Z2, Z1_alt, Z2_alt; if ( !identifyZstates(Z1, Z2, Z1_alt, Z2_alt, leptons_sel4l) ) vetoEvent; const double mZ1 = Z1.mom().mass(); const double mZ2 = Z2.mom().mass(); const double mZ1_alt = Z1_alt.mom().mass(); const double mZ2_alt = Z2_alt.mom().mass(); const double pTZ1 = Z1.mom().pT(); const double pTZ2 = Z2.mom().pT(); const double mZZ = (Z1.mom() + Z2.mom()).mass(); const double pTZZ = (Z1.mom() + Z2.mom()).pT(); // Event selections // pT(Z) > 2 GeV bool pass = pTZ1 > 2*GeV && pTZ2 > 2*GeV; if (!pass) vetoEvent; // Lepton kinematics: pT > 20, 15, 10 (8 if muon) GeV int n1 = 0, n2 = 0, n3 = 0; for (Particle& l : leptons_sel4l) { if (l.pT() > 20*GeV) ++n1; if (l.pT() > 15*GeV) ++n2; if (l.pT() > 10*GeV && l.abspid() == PID::ELECTRON) ++n3; if (l.pT() > 8*GeV && l.abspid() == PID::MUON) ++n3; } pass = pass && n1>=1 && n2>=2 && n3>=3; if (!pass) vetoEvent; // Dilepton mass: 50 < mZ1 < 120 GeV, 12 < mZ2 < 120 GeV pass = pass && mZ1 > 50*GeV && mZ1 < 120*GeV; pass = pass && mZ2 > 12*GeV && mZ2 < 120*GeV; if (!pass) vetoEvent; // Lepton separation: deltaR(l, l') > 0.1 (0.2) for same- (different-) flavor leptons for (size_t i = 0; i < leptons_sel4l.size(); ++i) { for (size_t j = i + 1; j < leptons_sel4l.size(); ++j) { const Particle& l1 = leptons_sel4l[i]; const Particle& l2 = leptons_sel4l[j]; pass = pass && deltaR(l1, l2) > (l1.abspid() == l2.abspid() ? 0.1 : 0.2); if (!pass) vetoEvent; } } // J/Psi veto: m(l+l-) > 5 GeV pass = pass && mZ1 > 5*GeV && mZ2 > 5*GeV && mZ1_alt > 5*GeV && mZ2_alt > 5*GeV; if (!pass) vetoEvent; // 80 < m4l < 1000 GeV pass = pass && mZZ > 80*GeV && mZZ < 1000*GeV; if (!pass) vetoEvent; // Fill histograms _h_ZZ_mZZ->fill(mZZ, weight); _h_ZZ_pTZZ->fill(pTZZ, weight); } /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn/TeV; scale(_h_ZZ_mZZ, norm); scale(_h_ZZ_pTZZ, norm); } /// Generic Z candidate struct Zstate : public ParticlePair { Zstate() { } Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { } FourMomentum mom() const { return first.momentum() + second.momentum(); } operator FourMomentum() const { return mom(); } static bool cmppT(const Zstate& lx, const Zstate& rx) { return lx.mom().pT() < rx.mom().pT(); } }; /// @brief 4l to ZZ assignment algorithm /// /// ZZ->4l pairing /// - At least two same flavour opposite sign (SFOS) lepton pairs /// - Ambiguities in pairing are resolved following the procedure /// 1. the leading Z (Z1) is choosen as the SFOS with dilepton mass closet to Z mass /// 2. the subleading Z (Z2) is choosen as the remaining SFOS dilepton pair /// /// Z1, Z2: the selected pairing /// Z1_alt, Z2_alt: the alternative pairing (the same as Z1, Z2 in 2e2m case) bool identifyZstates(Zstate& Z1, Zstate& Z2, Zstate& Z1_alt, Zstate& Z2_alt, const Particles& leptons_sel4l) { const double ZMASS = 91.1876*GeV; bool findZZ = false; Particles part_pos_el, part_neg_el, part_pos_mu, part_neg_mu; for (const Particle& l : leptons_sel4l) { if (l.abspid() == PID::ELECTRON) { if (l.pid() < 0) part_neg_el.push_back(l); if (l.pid() > 0) part_pos_el.push_back(l); } else if (l.abspid() == PID::MUON) { if (l.pid() < 0) part_neg_mu.push_back(l); if (l.pid() > 0) part_pos_mu.push_back(l); } } // eeee/mmmm channel if ((part_neg_el.size() == 2 && part_pos_el.size() == 2) || (part_neg_mu.size() == 2 && part_pos_mu.size() == 2)) { findZZ = true; Zstate Zcand_1, Zcand_2, Zcand_3, Zcand_4; Zstate Zcand_1_tmp, Zcand_2_tmp, Zcand_3_tmp, Zcand_4_tmp; if (part_neg_el.size() == 2) { // eeee Zcand_1_tmp = Zstate( ParticlePair( part_neg_el[0], part_pos_el[0] ) ); Zcand_2_tmp = Zstate( ParticlePair( part_neg_el[0], part_pos_el[1] ) ); Zcand_3_tmp = Zstate( ParticlePair( part_neg_el[1], part_pos_el[0] ) ); Zcand_4_tmp = Zstate( ParticlePair( part_neg_el[1], part_pos_el[1] ) ); } else { // mmmm Zcand_1_tmp = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[0] ) ); Zcand_2_tmp = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[1] ) ); Zcand_3_tmp = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[0] ) ); Zcand_4_tmp = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[1] ) ); } // We can have the following pairs: (Z1 + Z4) || (Z2 + Z3) // Firstly, reorder withing each quadruplet to have // - fabs(mZ1 - ZMASS) < fabs(mZ4 - ZMASS) // - fabs(mZ2 - ZMASS) < fabs(mZ3 - ZMASS) if (fabs(Zcand_1_tmp.mom().mass() - ZMASS) < fabs(Zcand_4_tmp.mom().mass() - ZMASS)) { Zcand_1 = Zcand_1_tmp; Zcand_4 = Zcand_4_tmp; } else { Zcand_1 = Zcand_4_tmp; Zcand_4 = Zcand_1_tmp; } if (fabs(Zcand_2_tmp.mom().mass() - ZMASS) < fabs(Zcand_3_tmp.mom().mass() - ZMASS)) { Zcand_2 = Zcand_2_tmp; Zcand_3 = Zcand_3_tmp; } else { Zcand_2 = Zcand_3_tmp; Zcand_3 = Zcand_2_tmp; } // We can have the following pairs: (Z1 + Z4) || (Z2 + Z3) // Secondly, select the leading and subleading Z following // 1. the leading Z (Z1) is choosen as the SFOS with dilepton mass closet to Z mass // 2. the subleading Z (Z2) is choosen as the remaining SFOS dilepton pair if (fabs(Zcand_1.mom().mass() - ZMASS) < fabs(Zcand_2.mom().mass() - ZMASS)) { Z1 = Zcand_1; Z2 = Zcand_4; Z1_alt = Zcand_2; Z2_alt = Zcand_3; } else { Z1 = Zcand_2; Z2 = Zcand_3; Z1_alt = Zcand_1; Z2_alt = Zcand_4; } } // end of eeee/mmmm channel else if (part_neg_el.size() == 1 && part_pos_el.size() == 1 && part_neg_mu.size() == 1 && part_pos_mu.size() == 1) { // 2e2m channel findZZ = true; Zstate Zcand_1, Zcand_2; Zcand_1 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[0] ) ); Zcand_2 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[0] ) ); if (fabs(Zcand_1.mom().mass() - ZMASS) < fabs(Zcand_2.mom().mass() - ZMASS)) { Z1 = Zcand_1; Z2 = Zcand_2; } else { Z1 = Zcand_2; Z2 = Zcand_1; } Z1_alt = Z1; Z2_alt = Z2; } return findZZ; } private: Histo1DPtr _h_ZZ_pTZZ, _h_ZZ_mZZ; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1394865); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1397637.cc b/analyses/pluginATLAS/ATLAS_2015_I1397637.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1397637.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1397637.cc @@ -1,218 +1,218 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2015_I1397637 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1397637); /// Book projections and histograms void init() { // Base final state definition const FinalState fs(Cuts::abseta < 4.5); // Neutrinos for MET IdentifiedFinalState nu_id; nu_id.acceptNeutrinos(); PromptFinalState neutrinos(nu_id); neutrinos.acceptTauDecays(true); declare(neutrinos, "neutrinos"); // Get photons used to dress leptons IdentifiedFinalState photons(fs); photons.acceptId(PID::PHOTON); // Use all bare muons as input to the DressedMuons projection IdentifiedFinalState mu_id(fs); mu_id.acceptIdPair(PID::MUON); PromptFinalState bare_mu(mu_id); bare_mu.acceptTauDecays(true); // Use all bare electrons as input to the DressedElectrons projection IdentifiedFinalState el_id(fs); el_id.acceptIdPair(PID::ELECTRON); PromptFinalState bare_el(el_id); bare_el.acceptTauDecays(true); // Use all bare leptons including taus for single-lepton filter IdentifiedFinalState lep_id(fs); lep_id.acceptIdPair(PID::MUON); lep_id.acceptIdPair(PID::ELECTRON); PromptFinalState bare_lep(lep_id); declare(bare_lep, "bare_lep"); // Tau finding /// @todo Use TauFinder - UnstableFinalState ufs; + UnstableParticles ufs; IdentifiedFinalState tau_id(ufs); tau_id.acceptIdPair(PID::TAU); PromptFinalState bare_tau(tau_id); declare(bare_tau, "bare_tau"); // Muons and electrons must have |eta| < 2.5 Cut eta_ranges = Cuts::abseta < 2.5; // Get dressed muons and the good muons (pt>25GeV) DressedLeptons all_dressed_mu(photons, bare_mu, 0.1, eta_ranges, true); DressedLeptons dressed_mu(photons, bare_mu, 0.1, eta_ranges && Cuts::pT > 25*GeV, true); declare(dressed_mu, "muons"); // Get dressed electrons and the good electrons (pt>25GeV) DressedLeptons all_dressed_el(photons, bare_el, 0.1, eta_ranges, true); DressedLeptons dressed_el(photons, bare_el, 0.1, eta_ranges && Cuts::pT > 25*GeV, true); declare(dressed_el, "electrons"); // Jet clustering VetoedFinalState vfs(fs); vfs.addVetoOnThisFinalState(all_dressed_el); vfs.addVetoOnThisFinalState(all_dressed_mu); vfs.addVetoOnThisFinalState(neutrinos); // Small-R jets /// @todo Use extra constructor args FastJets jets(vfs, FastJets::ANTIKT, 0.4); jets.useInvisibles(JetAlg::ALL_INVISIBLES); jets.useMuons(JetAlg::DECAY_MUONS); declare(jets, "jets"); // Large-R jets /// @todo Use extra constructor args FastJets large_jets(vfs, FastJets::ANTIKT, 1.0); large_jets.useInvisibles(JetAlg::ALL_INVISIBLES); large_jets.useMuons(JetAlg::DECAY_MUONS); declare(large_jets, "fat_jets"); /// Book histogram _h_pttop = bookHisto1D(1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Single lepton filter on bare leptons with no cuts const Particles& bare_lep = apply(event, "bare_lep").particles(); const Particles& bare_tau = apply(event, "bare_tau").particles(); if (bare_lep.size() + bare_tau.size() != 1) vetoEvent; // Electrons and muons const vector& electrons = apply(event, "electrons").dressedLeptons(); const vector& muons = apply(event, "muons").dressedLeptons(); if (electrons.size() + muons.size() != 1) vetoEvent; const DressedLepton& lepton = muons.empty() ? electrons[0] : muons[0]; // Get the neutrinos from the event record (they have pT > 0.0 and |eta| < 4.5 at this stage const Particles& neutrinos = apply(event, "neutrinos").particlesByPt(); FourMomentum met; for (const Particle& nu : neutrinos) met += nu.momentum(); if (met.pT() < 20*GeV) vetoEvent; // Thin jets and trimmed fat jets /// @todo Use Rivet built-in FJ trimming support const Jets& jets = apply(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5); const PseudoJets& fat_pjets = apply(event, "fat_jets").pseudoJetsByPt(); const double Rfilt = 0.3, ptFrac_min = 0.05; ///< @todo Need to be careful about the units for the pT cut passed to FJ? PseudoJets trimmed_fat_pjets; fastjet::Filter trimmer(fastjet::JetDefinition(fastjet::kt_algorithm, Rfilt), fastjet::SelectorPtFractionMin(ptFrac_min)); for (const PseudoJet& pjet : fat_pjets) trimmed_fat_pjets += trimmer(pjet); trimmed_fat_pjets = fastjet::sorted_by_pt(trimmed_fat_pjets); // Jet reclustering // Use a kT cluster sequence to recluster the trimmed jets so that a d12 can then be obtained from the reclustered jet vector splittingScales; for (const PseudoJet& tpjet : trimmed_fat_pjets) { const PseudoJets tpjet_constits = tpjet.constituents(); const fastjet::ClusterSequence kt_cs(tpjet_constits, fastjet::JetDefinition(fastjet::kt_algorithm, 1.5, fastjet::E_scheme, fastjet::Best)); const PseudoJets kt_jets = kt_cs.inclusive_jets(); const double d12 = 1.5 * sqrt(kt_jets[0].exclusive_subdmerge(1)); splittingScales += d12; } Jets trimmed_fat_jets; for (size_t i = 0; i < trimmed_fat_pjets.size(); ++i) { const Jet tj = trimmed_fat_pjets[i]; if (tj.mass() <= 100*GeV) continue; if (tj.pT() <= 300*GeV) continue; if (splittingScales[i] <= 40*GeV) continue; if (tj.abseta() >= 2.0) continue; trimmed_fat_jets += tj; } if (trimmed_fat_jets.empty()) vetoEvent; // Jet b-tagging Jets bjets, non_bjets; for (const Jet& jet : jets) (jet.bTagged() ? bjets : non_bjets) += jet; if (bjets.empty()) vetoEvent; // Boosted selection: lepton/jet overlap const double transmass = sqrt( 2 * lepton.pT() * met.pT() * (1 - cos(deltaPhi(lepton, met))) ); if (transmass + met.pt() <= 60*GeV) vetoEvent; int lepJetIndex = -1; for (size_t i = 0; i < jets.size(); ++i) { const Jet& jet = jets[i]; if (deltaR(jet, lepton) < 1.5) { lepJetIndex = i; break; } } if (lepJetIndex < 0) vetoEvent; const Jet& ljet = jets[lepJetIndex]; // Boosted selection: lepton-jet/fat-jet matching int fatJetIndex = -1; for (size_t j = 0; j < trimmed_fat_jets.size(); ++j) { const Jet& fjet = trimmed_fat_jets[j]; const double dR_fatjet = deltaR(ljet, fjet); const double dPhi_fatjet = deltaPhi(lepton, fjet); if (dR_fatjet > 1.5 && dPhi_fatjet > 2.3) { fatJetIndex = j; break; } } if (fatJetIndex < 0) vetoEvent; const Jet& fjet = trimmed_fat_jets[fatJetIndex]; // Boosted selection: b-tag matching const bool lepbtag = ljet.bTagged(); bool hadbtag = false; for (const Jet& bjet : bjets) { hadbtag |= (deltaR(fjet, bjet) < 1.0); } // Fill histo if selection passed if (hadbtag || lepbtag) _h_pttop->fill(fjet.pT()/GeV, event.weight()); } /// Normalise histograms etc., after the run void finalize() { scale(_h_pttop, crossSection()/femtobarn / sumOfWeights()); } private: Histo1DPtr _h_pttop; }; DECLARE_RIVET_PLUGIN(ATLAS_2015_I1397637); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1604029.cc b/analyses/pluginATLAS/ATLAS_2017_I1604029.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1604029.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1604029.cc @@ -1,149 +1,149 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PromptFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { ///@brief: ttbar + gamma at 8 TeV class ATLAS_2017_I1604029 : public Analysis { public: // Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1604029); // Book histograms and initialise projections before the run void init() { const FinalState fs; // signal photons PromptFinalState prompt_ph(Cuts::abspid == PID::PHOTON && Cuts::pT > 15*GeV && Cuts::abseta < 2.37); declare(prompt_ph, "photons"); // bare leptons Cut base_cuts = (Cuts::abseta < 2.7) && (Cuts::pT > 10*GeV); IdentifiedFinalState bare_leps(base_cuts); bare_leps.acceptIdPair(PID::MUON); bare_leps.acceptIdPair(PID::ELECTRON); declare(bare_leps, "bare_leptons"); // dressed leptons Cut dressed_cuts = (Cuts::abseta < 2.5) && (Cuts::pT > 25*GeV); PromptFinalState prompt_mu(base_cuts && Cuts::abspid == PID::MUON); PromptFinalState prompt_el(base_cuts && Cuts::abspid == PID::ELECTRON); IdentifiedFinalState all_photons(fs, PID::PHOTON); DressedLeptons elecs(all_photons, prompt_el, 0.1, dressed_cuts); declare(elecs, "elecs"); DressedLeptons muons(all_photons, prompt_mu, 0.1, dressed_cuts); declare(muons, "muons"); // auxiliary projections for 'single-lepton ttbar filter' PromptFinalState prompt_lep(Cuts::abspid == PID::MUON || Cuts::abspid == PID::ELECTRON); declare(prompt_lep, "prompt_leps"); - declare(UnstableFinalState(), "ufs"); + declare(UnstableParticles(), "ufs"); // jets FastJets jets(fs, FastJets::ANTIKT, 0.4, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES); declare(jets, "jets"); // BOOK HISTOGRAMS _h["pt"] = bookHisto1D(2,1,1); _h["eta"] = bookHisto1D(3,1,1); } // Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // analysis extrapolated to 1-lepton-plus-jets channel, where "lepton" cannot be a tau // (i.e. contribution from dileptonic ttbar where one of the leptons is outside // the detector acceptance has been subtracted as a background) if (applyProjection(event, "prompt_leps").particles().size() != 1) vetoEvent; - for (const auto& p : apply(event, "ufs").particles()) { + for (const auto& p : apply(event, "ufs").particles()) { if (p.fromPromptTau()) vetoEvent; } // photon selection Particles photons = applyProjection(event, "photons").particlesByPt(); Particles bare_leps = apply(event, "bare_leptons").particles(); for (const Particle& lep : bare_leps) ifilter_discard(photons, deltaRLess(lep, 0.1)); if (photons.size() != 1) vetoEvent; const Particle& photon = photons[0]; // jet selection Jets jets = apply(event, "jets").jetsByPt(Cuts::abseta < 2.5 && Cuts::pT > 25*GeV); // lepton selection const vector& elecs = apply(event, "elecs").dressedLeptons(); const vector& all_muons = apply(event, "muons").dressedLeptons(); // jet photon/electron overlap removal for (const DressedLepton& e : elecs) ifilter_discard(jets, deltaRLess(e, 0.2, RAPIDITY)); for (const Particle& ph : photons) ifilter_discard(jets, deltaRLess(ph, 0.1, RAPIDITY)); if (jets.size() < 4) vetoEvent; // photon-jet minimum deltaR double mindR_phjet = 999.; for (Jet jet : jets) { const double dR_phjet = deltaR(photon, jet); if (dR_phjet < mindR_phjet) mindR_phjet = dR_phjet; } if (mindR_phjet < 0.5) vetoEvent; // muon jet overlap removal vector muons; foreach (DressedLepton mu, all_muons) { bool overlaps = false; foreach (Jet jet, jets) { if (deltaR(mu, jet) < 0.4) { overlaps = true; break; } } if (overlaps) continue; muons.push_back(mu); } // one electron XOR one muon bool isEl = elecs.size() == 1 && muons.size() == 0; bool isMu = muons.size() == 1 && elecs.size() == 0; if (!isEl && !isMu) vetoEvent; // photon-lepton deltaR double mindR_phlep = deltaR(photon, isEl? elecs[0] : muons[0]); if (mindR_phlep < 0.7) vetoEvent; // b-tagging Jets bjets; foreach (Jet jet, jets) { if (jet.bTagged(Cuts::pT > 5*GeV)) bjets +=jet; } if (bjets.empty()) vetoEvent; _h["pt"]->fill(photon.pT()/GeV, weight); _h["eta"]->fill(photon.abseta(), weight); } // Normalise histograms etc., after the run void finalize() { const double normto(crossSection() / femtobarn / sumOfWeights()); for (auto &hist : _h) { scale(hist.second, normto); } } private: map _h; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1604029); } diff --git a/analyses/pluginCMS/CMS_2011_S8973270.cc b/analyses/pluginCMS/CMS_2011_S8973270.cc --- a/analyses/pluginCMS/CMS_2011_S8973270.cc +++ b/analyses/pluginCMS/CMS_2011_S8973270.cc @@ -1,164 +1,164 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class CMS_2011_S8973270 : public Analysis { public: /// Constructor CMS_2011_S8973270() : Analysis("CMS_2011_S8973270") { } void init() { FinalState fs; FastJets jetproj(fs, FastJets::ANTIKT, 0.5); jetproj.useInvisibles(); declare(jetproj, "Jets"); - UnstableFinalState ufs; + UnstableParticles ufs; declare(ufs, "UFS"); // Book histograms _h_dsigma_dR_56GeV = bookHisto1D(1,1,1); _h_dsigma_dR_84GeV = bookHisto1D(2,1,1); _h_dsigma_dR_120GeV = bookHisto1D(3,1,1); _h_dsigma_dPhi_56GeV = bookHisto1D(4,1,1); _h_dsigma_dPhi_84GeV = bookHisto1D(5,1,1); _h_dsigma_dPhi_120GeV = bookHisto1D(6,1,1); _countMCDR56 = 0; _countMCDR84 = 0; _countMCDR120 = 0; _countMCDPhi56 = 0; _countMCDPhi84 = 0; _countMCDPhi120 = 0; } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); const Jets& jets = apply(event,"Jets").jetsByPt(); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); // Find the leading jet pT and eta if (jets.size() == 0) vetoEvent; const double ljpT = jets[0].pT(); const double ljeta = jets[0].eta(); MSG_DEBUG("Leading jet pT / eta: " << ljpT << " / " << ljeta); // Minimum requirement for event if (ljpT > 56*GeV && fabs(ljeta) < 3.0) { // Find B hadrons in event int nab = 0, nb = 0; //counters for all B and independent B hadrons double etaB1 = 7.7, etaB2 = 7.7; double phiB1 = 7.7, phiB2 = 7.7; double pTB1 = 7.7, pTB2 = 7.7; foreach (const Particle& p, ufs.particles()) { int aid = p.abspid(); if (aid/100 == 5 || aid/1000==5) { nab++; // 2J+1 == 1 (mesons) or 2 (baryons) if (aid%10 == 1 || aid%10 == 2) { // No B decaying to B if (aid != 5222 && aid != 5112 && aid != 5212 && aid != 5322) { if (nb==0) { etaB1 = p.eta(); phiB1 = p.phi(); pTB1 = p.pT(); } else if (nb==1) { etaB2 = p.eta(); phiB2 = p.phi(); pTB2 = p.pT(); } nb++; } } MSG_DEBUG("ID " << aid << " B hadron"); } } if (nb==2 && pTB1 > 15*GeV && pTB2 > 15*GeV && fabs(etaB1) < 2.0 && fabs(etaB2) < 2.0) { double dPhi = deltaPhi(phiB1, phiB2); double dR = deltaR(etaB1, phiB1, etaB2, phiB2); MSG_DEBUG("DR/DPhi " << dR << " " << dPhi); // MC counters if (dR > 2.4) _countMCDR56 += weight; if (dR > 2.4 && ljpT > 84*GeV) _countMCDR84 += weight; if (dR > 2.4 && ljpT > 120*GeV) _countMCDR120 += weight; if (dPhi > 3.*PI/4.) _countMCDPhi56 += weight; if (dPhi > 3.*PI/4. && ljpT > 84*GeV) _countMCDPhi84 += weight; if (dPhi > 3.*PI/4. && ljpT > 120*GeV) _countMCDPhi120 += weight; _h_dsigma_dR_56GeV->fill(dR, weight); if (ljpT > 84*GeV) _h_dsigma_dR_84GeV->fill(dR, weight); if (ljpT > 120*GeV) _h_dsigma_dR_120GeV->fill(dR, weight); _h_dsigma_dPhi_56GeV->fill(dPhi, weight); if (ljpT > 84*GeV) _h_dsigma_dPhi_84GeV->fill(dPhi, weight); if (ljpT > 120*GeV) _h_dsigma_dPhi_120GeV->fill(dPhi, weight); //MSG_DEBUG("nb " << nb << " " << nab); } } } /// Normalise histograms etc., after the run void finalize() { MSG_DEBUG("crossSection " << crossSection() << " sumOfWeights " << sumOfWeights()); // Hardcoded bin widths double DRbin = 0.4; double DPhibin = PI/8.0; // Find out the correct numbers double nDataDR56 = 25862.20; double nDataDR84 = 5675.55; double nDataDR120 = 1042.72; double nDataDPhi56 = 24220.00; double nDataDPhi84 = 4964.00; double nDataDPhi120 = 919.10; double normDR56 = (_countMCDR56 > 0.) ? nDataDR56/_countMCDR56 : crossSection()/sumOfWeights(); double normDR84 = (_countMCDR84 > 0.) ? nDataDR84/_countMCDR84 : crossSection()/sumOfWeights(); double normDR120 = (_countMCDR120 > 0.) ? nDataDR120/_countMCDR120 : crossSection()/sumOfWeights(); double normDPhi56 = (_countMCDPhi56 > 0.) ? nDataDPhi56/_countMCDPhi56 : crossSection()/sumOfWeights(); double normDPhi84 = (_countMCDPhi84 > 0.) ? nDataDPhi84/_countMCDPhi84 : crossSection()/sumOfWeights(); double normDPhi120 = (_countMCDPhi120 > 0.) ? nDataDPhi120/_countMCDPhi120 : crossSection()/sumOfWeights(); scale(_h_dsigma_dR_56GeV, normDR56*DRbin); scale(_h_dsigma_dR_84GeV, normDR84*DRbin); scale(_h_dsigma_dR_120GeV, normDR120*DRbin); scale(_h_dsigma_dPhi_56GeV, normDPhi56*DPhibin); scale(_h_dsigma_dPhi_84GeV, normDPhi84*DPhibin); scale(_h_dsigma_dPhi_120GeV, normDPhi120*DPhibin); } //@} private: /// @name Counters //@{ double _countMCDR56, _countMCDR84, _countMCDR120; double _countMCDPhi56, _countMCDPhi84, _countMCDPhi120; //@} /// @name Histograms //@{ Histo1DPtr _h_dsigma_dR_56GeV, _h_dsigma_dR_84GeV, _h_dsigma_dR_120GeV; Histo1DPtr _h_dsigma_dPhi_56GeV, _h_dsigma_dPhi_84GeV, _h_dsigma_dPhi_120GeV; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2011_S8973270); } diff --git a/analyses/pluginCMS/CMS_2011_S8978280.cc b/analyses/pluginCMS/CMS_2011_S8978280.cc --- a/analyses/pluginCMS/CMS_2011_S8978280.cc +++ b/analyses/pluginCMS/CMS_2011_S8978280.cc @@ -1,114 +1,114 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief CMS strange particle spectra (Ks, Lambda, Cascade) in pp at 900 and 7000 GeV /// @author Kevin Stenson class CMS_2011_S8978280 : public Analysis { public: /// Constructor CMS_2011_S8978280() : Analysis("CMS_2011_S8978280") { } void init() { - UnstableFinalState ufs(Cuts::absrap < 2); + UnstableParticles ufs(Cuts::absrap < 2); declare(ufs, "UFS"); // Particle distributions versus rapidity and transverse momentum if (fuzzyEquals(sqrtS()/GeV, 900*GeV)){ _h_dNKshort_dy = bookHisto1D(1, 1, 1); _h_dNKshort_dpT = bookHisto1D(2, 1, 1); _h_dNLambda_dy = bookHisto1D(3, 1, 1); _h_dNLambda_dpT = bookHisto1D(4, 1, 1); _h_dNXi_dy = bookHisto1D(5, 1, 1); _h_dNXi_dpT = bookHisto1D(6, 1, 1); // _h_LampT_KpT = bookScatter2D(7, 1, 1); _h_XipT_LampT = bookScatter2D(8, 1, 1); _h_Lamy_Ky = bookScatter2D(9, 1, 1); _h_Xiy_Lamy = bookScatter2D(10, 1, 1); } else if (fuzzyEquals(sqrtS()/GeV, 7000*GeV)){ _h_dNKshort_dy = bookHisto1D(1, 1, 2); _h_dNKshort_dpT = bookHisto1D(2, 1, 2); _h_dNLambda_dy = bookHisto1D(3, 1, 2); _h_dNLambda_dpT = bookHisto1D(4, 1, 2); _h_dNXi_dy = bookHisto1D(5, 1, 2); _h_dNXi_dpT = bookHisto1D(6, 1, 2); // _h_LampT_KpT = bookScatter2D(7, 1, 2); _h_XipT_LampT = bookScatter2D(8, 1, 2); _h_Lamy_Ky = bookScatter2D(9, 1, 2); _h_Xiy_Lamy = bookScatter2D(10, 1, 2); } } void analyze(const Event& event) { const double weight = event.weight(); - const UnstableFinalState& parts = apply(event, "UFS"); + const UnstableParticles& parts = apply(event, "UFS"); foreach (const Particle& p, parts.particles()) { switch (p.abspid()) { case PID::K0S: _h_dNKshort_dy->fill(p.absrap(), weight); _h_dNKshort_dpT->fill(p.pT(), weight); break; case PID::LAMBDA: // Lambda should not have Cascade or Omega ancestors since they should not decay. But just in case... if ( !( p.hasAncestor(3322) || p.hasAncestor(-3322) || p.hasAncestor(3312) || p.hasAncestor(-3312) || p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) { _h_dNLambda_dy->fill(p.absrap(), weight); _h_dNLambda_dpT->fill(p.pT(), weight); } break; case PID::XIMINUS: // Cascade should not have Omega ancestors since it should not decay. But just in case... if ( !( p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) { _h_dNXi_dy->fill(p.absrap(), weight); _h_dNXi_dpT->fill(p.pT(), weight); } break; } } } void finalize() { divide(_h_dNLambda_dpT,_h_dNKshort_dpT, _h_LampT_KpT); divide(_h_dNXi_dpT,_h_dNLambda_dpT, _h_XipT_LampT); divide(_h_dNLambda_dy,_h_dNKshort_dy, _h_Lamy_Ky); divide(_h_dNXi_dy,_h_dNLambda_dy, _h_Xiy_Lamy); const double normpT = 1.0/sumOfWeights(); const double normy = 0.5*normpT; // Accounts for using |y| instead of y scale(_h_dNKshort_dy, normy); scale(_h_dNKshort_dpT, normpT); scale(_h_dNLambda_dy, normy); scale(_h_dNLambda_dpT, normpT); scale(_h_dNXi_dy, normy); scale(_h_dNXi_dpT, normpT); } private: // Particle distributions versus rapidity and transverse momentum Histo1DPtr _h_dNKshort_dy, _h_dNKshort_dpT, _h_dNLambda_dy, _h_dNLambda_dpT, _h_dNXi_dy, _h_dNXi_dpT; Scatter2DPtr _h_LampT_KpT, _h_XipT_LampT, _h_Lamy_Ky, _h_Xiy_Lamy; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2011_S8978280); } diff --git a/analyses/pluginCMS/CMS_2012_PAS_QCD_11_010.cc b/analyses/pluginCMS/CMS_2012_PAS_QCD_11_010.cc --- a/analyses/pluginCMS/CMS_2012_PAS_QCD_11_010.cc +++ b/analyses/pluginCMS/CMS_2012_PAS_QCD_11_010.cc @@ -1,89 +1,89 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class CMS_2012_PAS_QCD_11_010 : public Analysis { public: CMS_2012_PAS_QCD_11_010() : Analysis("CMS_2012_PAS_QCD_11_010") { } void init() { const FastJets jets(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 0.5*GeV), FastJets::ANTIKT, 0.5); declare(jets, "Jets"); - const UnstableFinalState ufs(Cuts::abseta < 2 && Cuts::pT > 0.6*GeV); + const UnstableParticles ufs(Cuts::abseta < 2 && Cuts::pT > 0.6*GeV); declare(ufs, "UFS"); _h_nTrans_Lambda = bookProfile1D(1, 1, 1); _h_nTrans_Kaon = bookProfile1D(2, 1, 1); _h_ptsumTrans_Lambda = bookProfile1D(3, 1, 1); _h_ptsumTrans_Kaon = bookProfile1D(4, 1, 1); } void analyze(const Event& event) { const double weight = event.weight(); Jets jets = apply(event, "Jets").jetsByPt(1.0*GeV); if (jets.size() < 1) vetoEvent; if (fabs(jets[0].eta()) >= 2) { // cuts on leading jets vetoEvent; } FourMomentum p_lead = jets[0].momentum(); const double pTlead = p_lead.pT(); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); int numTrans_Kaon = 0; int numTrans_Lambda = 0; double ptSumTrans_Kaon = 0.; double ptSumTrans_Lambda = 0.; foreach (const Particle& p, ufs.particles()) { double dphi = deltaPhi(p, p_lead); double pT = p.pT(); const PdgId id = p.abspid(); if (dphi > PI/3. && dphi < 2./3.*PI) { if (id == 310 && pT > 0.6*GeV) { ptSumTrans_Kaon += pT/GeV; numTrans_Kaon++; } else if (id == 3122 && pT > 1.5*GeV) { ptSumTrans_Lambda += pT/GeV; numTrans_Lambda++; } } } _h_nTrans_Kaon->fill(pTlead/GeV, numTrans_Kaon / (8.0 * PI/3.0), weight); _h_nTrans_Lambda->fill(pTlead/GeV, numTrans_Lambda / (8.0 * PI/3.0), weight); _h_ptsumTrans_Kaon->fill(pTlead/GeV, ptSumTrans_Kaon / (GeV * (8.0 * PI/3.0)), weight); _h_ptsumTrans_Lambda->fill(pTlead/GeV, ptSumTrans_Lambda / (GeV * (8.0 * PI/3.0)), weight); } void finalize() { } private: Profile1DPtr _h_nTrans_Kaon; Profile1DPtr _h_nTrans_Lambda; Profile1DPtr _h_ptsumTrans_Kaon; Profile1DPtr _h_ptsumTrans_Lambda; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_PAS_QCD_11_010); } diff --git a/analyses/pluginCMS/CMS_2013_I1256943.cc b/analyses/pluginCMS/CMS_2013_I1256943.cc --- a/analyses/pluginCMS/CMS_2013_I1256943.cc +++ b/analyses/pluginCMS/CMS_2013_I1256943.cc @@ -1,194 +1,194 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// CMS cross-section and angular correlations in Z boson + b-hadrons events at 7 TeV class CMS_2013_I1256943 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1256943); /// Add projections and book histograms void init() { _sumW = 0; _sumW50 = 0; _sumWpT = 0; FinalState fs(Cuts::abseta < 2.4 && Cuts::pT > 20*GeV); declare(fs, "FS"); - UnstableFinalState ufs(Cuts::abseta < 2 && Cuts::pT > 15*GeV); + UnstableParticles ufs(Cuts::abseta < 2 && Cuts::pT > 15*GeV); declare(ufs, "UFS"); Cut zetacut = Cuts::abseta < 2.4; ZFinder zfindermu(fs, zetacut, PID::MUON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::NOCLUSTER, ZFinder::TRACK, 91.2*GeV); declare(zfindermu, "ZFinderMu"); ZFinder zfinderel(fs, zetacut, PID::ELECTRON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::NOCLUSTER, ZFinder::TRACK, 91.2*GeV); declare(zfinderel, "ZFinderEl"); // Histograms in non-boosted region of Z pT _h_dR_BB = bookHisto1D(1, 1, 1); _h_dphi_BB = bookHisto1D(2, 1, 1); _h_min_dR_ZB = bookHisto1D(3, 1, 1); _h_A_ZBB = bookHisto1D(4, 1, 1); // Histograms in boosted region of Z pT (pT > 50 GeV) _h_dR_BB_boost = bookHisto1D(5, 1, 1); _h_dphi_BB_boost = bookHisto1D(6, 1, 1); _h_min_dR_ZB_boost = bookHisto1D(7, 1, 1); _h_A_ZBB_boost = bookHisto1D(8, 1, 1); _h_min_ZpT = bookHisto1D(9,1,1); } /// Do the analysis void analyze(const Event& e) { - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); const ZFinder& zfindermu = apply(e, "ZFinderMu"); const ZFinder& zfinderel = apply(e, "ZFinderEl"); // Look for a Z --> mu+ mu- event in the final state if (zfindermu.empty() && zfinderel.empty()) vetoEvent; const Particles& z = !zfindermu.empty() ? zfindermu.bosons() : zfinderel.bosons(); const bool is_boosted = ( z[0].pT() > 50*GeV ); vector Bmom; // Loop over the unstable particles for (const Particle& p : ufs.particles()) { const PdgId pid = p.pid(); // Look for particles with a bottom quark if (PID::hasBottom(pid)) { bool good_B = false; const GenParticle* pgen = p.genParticle(); const GenVertex* vgen = pgen -> end_vertex(); // Loop over the decay products of each unstable particle, looking for a b-hadron pair /// @todo Avoid HepMC API if (vgen) { for (GenVertex::particles_out_const_iterator it = vgen->particles_out_const_begin(); it != vgen->particles_out_const_end(); ++it) { // If the particle produced has a bottom quark do not count it and go to the next loop cycle. if (!( PID::hasBottom( (*it)->pdg_id() ) ) ) { good_B = true; continue; } else { good_B = false; break; } } if (good_B ) Bmom.push_back( p.momentum() ); } else continue; } } // If there are more than two B's in the final state veto the event if (Bmom.size() != 2 ) { Bmom.clear(); vetoEvent; } // Calculate the observables double dphiBB = deltaPhi(Bmom[0], Bmom[1]); double dRBB = deltaR(Bmom[0], Bmom[1]); const FourMomentum& pZ = z[0].momentum(); const bool closest_B = ( deltaR(pZ, Bmom[0]) < deltaR(pZ, Bmom[1]) ); const double mindR_ZB = closest_B ? deltaR(pZ, Bmom[0]) : deltaR(pZ, Bmom[1]); const double maxdR_ZB = closest_B ? deltaR(pZ, Bmom[1]) : deltaR(pZ, Bmom[0]); const double AZBB = ( maxdR_ZB - mindR_ZB ) / ( maxdR_ZB + mindR_ZB ); // Get event weight for histogramming const double weight = e.weight(); // Fill the histograms in the non-boosted region _h_dphi_BB->fill(dphiBB, weight); _h_dR_BB->fill(dRBB, weight); _h_min_dR_ZB->fill(mindR_ZB, weight); _h_A_ZBB->fill(AZBB, weight); _sumW += weight; _sumWpT += weight; // Fill the histograms in the boosted region if (is_boosted) { _sumW50 += weight; _h_dphi_BB_boost->fill(dphiBB, weight); _h_dR_BB_boost->fill(dRBB, weight); _h_min_dR_ZB_boost->fill(mindR_ZB, weight); _h_A_ZBB_boost->fill(AZBB, weight); } // Fill Z pT (cumulative) histogram _h_min_ZpT->fill(0, weight); if (pZ.pT() > 40*GeV ) { _sumWpT += weight; _h_min_ZpT->fill(40, weight); } if (pZ.pT() > 80*GeV ) { _sumWpT += weight; _h_min_ZpT->fill(80, weight); } if (pZ.pT() > 120*GeV ) { _sumWpT += weight; _h_min_ZpT->fill(120, weight); } Bmom.clear(); } /// Finalize void finalize() { // Normalize excluding overflow bins (d'oh) normalize(_h_dR_BB, 0.7*crossSection()*_sumW/sumOfWeights(), false); // d01-x01-y01 normalize(_h_dphi_BB, 0.53*crossSection()*_sumW/sumOfWeights(), false); // d02-x01-y01 normalize(_h_min_dR_ZB, 0.84*crossSection()*_sumW/sumOfWeights(), false); // d03-x01-y01 normalize(_h_A_ZBB, 0.2*crossSection()*_sumW/sumOfWeights(), false); // d04-x01-y01 normalize(_h_dR_BB_boost, 0.84*crossSection()*_sumW50/sumOfWeights(), false); // d05-x01-y01 normalize(_h_dphi_BB_boost, 0.63*crossSection()*_sumW50/sumOfWeights(), false); // d06-x01-y01 normalize(_h_min_dR_ZB_boost, 1*crossSection()*_sumW50/sumOfWeights(), false); // d07-x01-y01 normalize(_h_A_ZBB_boost, 0.25*crossSection()*_sumW50/sumOfWeights(), false); // d08-x01-y01 normalize(_h_min_ZpT, 40*crossSection()*_sumWpT/sumOfWeights(), false); // d09-x01-y01 } private: /// @name Weight counters //@{ double _sumW, _sumW50, _sumWpT; //@} /// @name Histograms //@{ Histo1DPtr _h_dphi_BB, _h_dR_BB, _h_min_dR_ZB, _h_A_ZBB; Histo1DPtr _h_dphi_BB_boost, _h_dR_BB_boost, _h_min_dR_ZB_boost, _h_A_ZBB_boost, _h_min_ZpT; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1256943); } diff --git a/analyses/pluginCMS/CMS_2016_I1486238.cc b/analyses/pluginCMS/CMS_2016_I1486238.cc --- a/analyses/pluginCMS/CMS_2016_I1486238.cc +++ b/analyses/pluginCMS/CMS_2016_I1486238.cc @@ -1,124 +1,124 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/InitialQuarks.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// Studies of 2 b-jet + 2 jet production in proton-proton collisions at 7 TeV class CMS_2016_I1486238 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1486238); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FastJets akt(FinalState(), FastJets::ANTIKT, 0.5); addProjection(akt, "antikT"); _h_Deltaphi_newway = bookHisto1D(1,1,1); _h_deltaphiafterlight = bookHisto1D(9,1,1); _h_SumPLight = bookHisto1D(5,1,1); _h_LeadingBJetpt = bookHisto1D(11,1,1); _h_SubleadingBJetpt = bookHisto1D(15,1,1); _h_LeadingLightJetpt = bookHisto1D(13,1,1); _h_SubleadingLightJetpt = bookHisto1D(17,1,1); _h_LeadingBJeteta = bookHisto1D(10,1,1); _h_SubleadingBJeteta = bookHisto1D(14,1,1); _h_LeadingLightJeteta = bookHisto1D(12,1,1); _h_SubleadingLightJeteta = bookHisto1D(16,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const Jets& jets = apply(event, "antikT").jetsByPt(Cuts::absrap < 4.7 && Cuts::pT > 20*GeV); if (jets.size() < 4) vetoEvent; // Initial quarks /// @note Quark-level tagging... Particles bquarks; for (const GenParticle* p : particles(event.genEvent())) { if (abs(p->pdg_id()) == PID::BQUARK) bquarks += Particle(p); } Jets bjets, ljets; for (const Jet& j : jets) { const bool btag = any(bquarks, deltaRLess(j, 0.3)); // for (const Particle& b : bquarks) if (deltaR(j, b) < 0.3) btag = true; (btag && j.abseta() < 2.4 ? bjets : ljets).push_back(j); } // Fill histograms const double weight = event.weight(); if (bjets.size() >= 2 && ljets.size() >= 2) { _h_LeadingBJetpt->fill(bjets[0].pT()/GeV, weight); _h_SubleadingBJetpt->fill(bjets[1].pT()/GeV, weight); _h_LeadingLightJetpt->fill(ljets[0].pT()/GeV, weight); _h_SubleadingLightJetpt->fill(ljets[1].pT()/GeV, weight); // _h_LeadingBJeteta->fill(bjets[0].eta(), weight); _h_SubleadingBJeteta->fill(bjets[1].eta(), weight); _h_LeadingLightJeteta->fill(ljets[0].eta(), weight); _h_SubleadingLightJeteta->fill(ljets[1].eta(), weight); const double lightdphi = deltaPhi(ljets[0], ljets[1]); _h_deltaphiafterlight->fill(lightdphi, weight); const double vecsumlightjets = sqrt(sqr(ljets[0].px()+ljets[1].px()) + sqr(ljets[0].py()+ljets[1].py())); //< @todo Just (lj0+lj1).pT()? Or use add_quad const double term2 = vecsumlightjets/(sqrt(sqr(ljets[0].px()) + sqr(ljets[0].py())) + sqrt(sqr(ljets[1].px()) + sqr(ljets[1].py()))); //< @todo lj0.pT() + lj1.pT()? Or add_quad _h_SumPLight->fill(term2, weight); const double pxBsyst2 = bjets[0].px()+bjets[1].px(); // @todo (bj0+bj1).px() const double pyBsyst2 = bjets[0].py()+bjets[1].py(); // @todo (bj0+bj1).py() const double pxJetssyst2 = ljets[0].px()+ljets[1].px(); // @todo (lj0+lj1).px() const double pyJetssyst2 = ljets[0].py()+ljets[1].py(); // @todo (lj0+lj1).py() const double modulusB2 = sqrt(sqr(pxBsyst2)+sqr(pyBsyst2)); //< @todo add_quad const double modulusJets2 = sqrt(sqr(pxJetssyst2)+sqr(pyJetssyst2)); //< @todo add_quad const double cosphiBsyst2 = pxBsyst2/modulusB2; const double cosphiJetssyst2 = pxJetssyst2/modulusJets2; const double phiBsyst2 = ((pyBsyst2 > 0) ? 1 : -1) * acos(cosphiBsyst2); //< @todo sign(pyBsyst2) const double phiJetssyst2 = sign(pyJetssyst2) * acos(cosphiJetssyst2); const double Dphi2 = deltaPhi(phiBsyst2, phiJetssyst2); _h_Deltaphi_newway->fill(Dphi2,weight); } } /// Normalise histograms etc., after the run void finalize() { const double invlumi = crossSection()/picobarn/sumOfWeights(); normalize({_h_SumPLight, _h_deltaphiafterlight, _h_Deltaphi_newway}); scale({_h_LeadingLightJetpt, _h_SubleadingLightJetpt, _h_LeadingBJetpt, _h_SubleadingBJetpt}, invlumi); scale({_h_LeadingLightJeteta, _h_SubleadingLightJeteta, _h_LeadingBJeteta, _h_SubleadingBJeteta}, invlumi); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_deltaphiafterlight, _h_Deltaphi_newway, _h_SumPLight; Histo1DPtr _h_LeadingBJetpt, _h_SubleadingBJetpt, _h_LeadingLightJetpt, _h_SubleadingLightJetpt; Histo1DPtr _h_LeadingBJeteta, _h_SubleadingBJeteta, _h_LeadingLightJeteta, _h_SubleadingLightJeteta; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1486238); } diff --git a/analyses/pluginLEP/ALEPH_1996_S3486095.cc b/analyses/pluginLEP/ALEPH_1996_S3486095.cc --- a/analyses/pluginLEP/ALEPH_1996_S3486095.cc +++ b/analyses/pluginLEP/ALEPH_1996_S3486095.cc @@ -1,557 +1,557 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief ALEPH QCD study with event shapes and identified particles /// @author Holger Schulz class ALEPH_1996_S3486095 : public Analysis { public: /// Constructor ALEPH_1996_S3486095() : Analysis("ALEPH_1996_S3486095") { _numChParticles = 0; _weightedTotalPartNum = 0; _weightedTotalNumPiPlus = 0; _weightedTotalNumKPlus = 0; _weightedTotalNumP = 0; _weightedTotalNumPhoton = 0; _weightedTotalNumPi0 = 0; _weightedTotalNumEta = 0; _weightedTotalNumEtaPrime = 0; _weightedTotalNumK0 = 0; _weightedTotalNumLambda0 = 0; _weightedTotalNumXiMinus = 0; _weightedTotalNumSigma1385Plus= 0; _weightedTotalNumXi1530_0 = 0; _weightedTotalNumRho = 0; _weightedTotalNumOmega782 = 0; _weightedTotalNumKStar892_0 = 0; _weightedTotalNumPhi = 0; _weightedTotalNumKStar892Plus = 0; } /// @name Analysis methods //@{ void init() { // Set up projections declare(Beam(), "Beams"); const ChargedFinalState cfs; declare(cfs, "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets"); declare(Sphericity(cfs), "Sphericity"); declare(ParisiTensor(cfs), "Parisi"); const Thrust thrust(cfs); declare(thrust, "Thrust"); declare(Hemispheres(thrust), "Hemispheres"); // Book histograms _histSphericity = bookHisto1D(1, 1, 1); _histAplanarity = bookHisto1D(2, 1, 1); _hist1MinusT = bookHisto1D(3, 1, 1); _histTMinor = bookHisto1D(4, 1, 1); _histY3 = bookHisto1D(5, 1, 1); _histHeavyJetMass = bookHisto1D(6, 1, 1); _histCParam = bookHisto1D(7, 1, 1); _histOblateness = bookHisto1D(8, 1, 1); _histScaledMom = bookHisto1D(9, 1, 1); _histRapidityT = bookHisto1D(10, 1, 1); _histPtSIn = bookHisto1D(11, 1, 1); _histPtSOut = bookHisto1D(12, 1, 1); _histLogScaledMom = bookHisto1D(17, 1, 1); _histChMult = bookHisto1D(18, 1, 1); _histMeanChMult = bookHisto1D(19, 1, 1); _histMeanChMultRapt05= bookHisto1D(20, 1, 1); _histMeanChMultRapt10= bookHisto1D(21, 1, 1); _histMeanChMultRapt15= bookHisto1D(22, 1, 1); _histMeanChMultRapt20= bookHisto1D(23, 1, 1); // Particle spectra _histMultiPiPlus = bookHisto1D(25, 1, 1); _histMultiKPlus = bookHisto1D(26, 1, 1); _histMultiP = bookHisto1D(27, 1, 1); _histMultiPhoton = bookHisto1D(28, 1, 1); _histMultiPi0 = bookHisto1D(29, 1, 1); _histMultiEta = bookHisto1D(30, 1, 1); _histMultiEtaPrime = bookHisto1D(31, 1, 1); _histMultiK0 = bookHisto1D(32, 1, 1); _histMultiLambda0 = bookHisto1D(33, 1, 1); _histMultiXiMinus = bookHisto1D(34, 1, 1); _histMultiSigma1385Plus = bookHisto1D(35, 1, 1); _histMultiXi1530_0 = bookHisto1D(36, 1, 1); _histMultiRho = bookHisto1D(37, 1, 1); _histMultiOmega782 = bookHisto1D(38, 1, 1); _histMultiKStar892_0 = bookHisto1D(39, 1, 1); _histMultiPhi = bookHisto1D(40, 1, 1); _histMultiKStar892Plus = bookHisto1D(43, 1, 1); // Mean multiplicities _histMeanMultiPi0 = bookHisto1D(44, 1, 2); _histMeanMultiEta = bookHisto1D(44, 1, 3); _histMeanMultiEtaPrime = bookHisto1D(44, 1, 4); _histMeanMultiK0 = bookHisto1D(44, 1, 5); _histMeanMultiRho = bookHisto1D(44, 1, 6); _histMeanMultiOmega782 = bookHisto1D(44, 1, 7); _histMeanMultiPhi = bookHisto1D(44, 1, 8); _histMeanMultiKStar892Plus = bookHisto1D(44, 1, 9); _histMeanMultiKStar892_0 = bookHisto1D(44, 1, 10); _histMeanMultiLambda0 = bookHisto1D(44, 1, 11); _histMeanMultiSigma0 = bookHisto1D(44, 1, 12); _histMeanMultiXiMinus = bookHisto1D(44, 1, 13); _histMeanMultiSigma1385Plus = bookHisto1D(44, 1, 14); _histMeanMultiXi1530_0 = bookHisto1D(44, 1, 15); _histMeanMultiOmegaOmegaBar = bookHisto1D(44, 1, 16); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); _weightedTotalPartNum += numParticles * weight; // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Thrusts MSG_DEBUG("Calculating thrust"); const Thrust& thrust = apply(e, "Thrust"); _hist1MinusT->fill(1 - thrust.thrust(), weight); _histTMinor->fill(thrust.thrustMinor(), weight); _histOblateness->fill(thrust.oblateness(), weight); // Jets MSG_DEBUG("Calculating differential jet rate plots:"); const FastJets& durjet = apply(e, "DurhamJets"); if (durjet.clusterSeq()) { double y3 = durjet.clusterSeq()->exclusive_ymerge_max(2); if (y3>0.0) _histY3->fill(-1. * std::log(y3), weight); } // Sphericities MSG_DEBUG("Calculating sphericity"); const Sphericity& sphericity = apply(e, "Sphericity"); _histSphericity->fill(sphericity.sphericity(), weight); _histAplanarity->fill(sphericity.aplanarity(), weight); // C param MSG_DEBUG("Calculating Parisi params"); const ParisiTensor& parisi = apply(e, "Parisi"); _histCParam->fill(parisi.C(), weight); // Hemispheres MSG_DEBUG("Calculating hemisphere variables"); const Hemispheres& hemi = apply(e, "Hemispheres"); _histHeavyJetMass->fill(hemi.scaledM2high(), weight); // Iterate over all the charged final state particles. double Evis = 0.0; double rapt05 = 0.; double rapt10 = 0.; double rapt15 = 0.; double rapt20 = 0.; //int numChParticles = 0; MSG_DEBUG("About to iterate over charged FS particles"); foreach (const Particle& p, fs.particles()) { // Get momentum and energy of each particle. const Vector3 mom3 = p.p3(); const double energy = p.E(); Evis += energy; _numChParticles += weight; // Scaled momenta. const double mom = mom3.mod(); const double scaledMom = mom/meanBeamMom; const double logInvScaledMom = -std::log(scaledMom); _histLogScaledMom->fill(logInvScaledMom, weight); _histScaledMom->fill(scaledMom, weight); // Get momenta components w.r.t. thrust and sphericity. const double momT = dot(thrust.thrustAxis(), mom3); const double pTinS = dot(mom3, sphericity.sphericityMajorAxis()); const double pToutS = dot(mom3, sphericity.sphericityMinorAxis()); _histPtSIn->fill(fabs(pTinS/GeV), weight); _histPtSOut->fill(fabs(pToutS/GeV), weight); // Calculate rapidities w.r.t. thrust. const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT)); _histRapidityT->fill(fabs(rapidityT), weight); if (std::fabs(rapidityT) <= 0.5) { rapt05 += 1.0; } if (std::fabs(rapidityT) <= 1.0) { rapt10 += 1.0; } if (std::fabs(rapidityT) <= 1.5) { rapt15 += 1.0; } if (std::fabs(rapidityT) <= 2.0) { rapt20 += 1.0; } } _histChMult->fill(numParticles, weight); _histMeanChMultRapt05->fill(_histMeanChMultRapt05->bin(0).xMid(), rapt05 * weight); _histMeanChMultRapt10->fill(_histMeanChMultRapt10->bin(0).xMid(), rapt10 * weight); _histMeanChMultRapt15->fill(_histMeanChMultRapt15->bin(0).xMid(), rapt15 * weight); _histMeanChMultRapt20->fill(_histMeanChMultRapt20->bin(0).xMid(), rapt20 * weight); _histMeanChMult->fill(_histMeanChMult->bin(0).xMid(), numParticles*weight); //// Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); for (Particles::const_iterator p = ufs.particles().begin(); p != ufs.particles().end(); ++p) { const Vector3 mom3 = p->momentum().p3(); int id = abs(p->pid()); const double mom = mom3.mod(); const double energy = p->momentum().E(); const double scaledMom = mom/meanBeamMom; const double scaledEnergy = energy/meanBeamMom; // meanBeamMom is approximately beam energy switch (id) { case 22: _histMultiPhoton->fill(-1.*std::log(scaledMom), weight); _weightedTotalNumPhoton += weight; break; case -321: case 321: _weightedTotalNumKPlus += weight; _histMultiKPlus->fill(scaledMom, weight); break; case 211: case -211: _histMultiPiPlus->fill(scaledMom, weight); _weightedTotalNumPiPlus += weight; break; case 2212: case -2212: _histMultiP->fill(scaledMom, weight); _weightedTotalNumP += weight; break; case 111: _histMultiPi0->fill(scaledMom, weight); _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); _weightedTotalNumPi0 += weight; break; case 221: if (scaledMom >= 0.1) { _histMultiEta->fill(scaledEnergy, weight); _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); _weightedTotalNumEta += weight; } break; case 331: if (scaledMom >= 0.1) { _histMultiEtaPrime->fill(scaledEnergy, weight); _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); _weightedTotalNumEtaPrime += weight; } break; case 130: //klong case 310: //kshort _histMultiK0->fill(scaledMom, weight); _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); _weightedTotalNumK0 += weight; break; case 113: _histMultiRho->fill(scaledMom, weight); _histMeanMultiRho->fill(_histMeanMultiRho->bin(0).xMid(), weight); _weightedTotalNumRho += weight; break; case 223: _histMultiOmega782->fill(scaledMom, weight); _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight); _weightedTotalNumOmega782 += weight; break; case 333: _histMultiPhi->fill(scaledMom, weight); _histMeanMultiPhi->fill(_histMeanMultiPhi->bin(0).xMid(), weight); _weightedTotalNumPhi += weight; break; case 313: case -313: _histMultiKStar892_0->fill(scaledMom, weight); _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); _weightedTotalNumKStar892_0 += weight; break; case 323: case -323: _histMultiKStar892Plus->fill(scaledEnergy, weight); _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); _weightedTotalNumKStar892Plus += weight; break; case 3122: case -3122: _histMultiLambda0->fill(scaledMom, weight); _histMeanMultiLambda0->fill(_histMeanMultiLambda0->bin(0).xMid(), weight); _weightedTotalNumLambda0 += weight; break; case 3212: case -3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight); break; case 3312: case -3312: _histMultiXiMinus->fill(scaledEnergy, weight); _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); _weightedTotalNumXiMinus += weight; break; case 3114: case -3114: case 3224: case -3224: _histMultiSigma1385Plus->fill(scaledEnergy, weight); _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _weightedTotalNumSigma1385Plus += weight; break; case 3324: case -3324: _histMultiXi1530_0->fill(scaledEnergy, weight); _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight); _weightedTotalNumXi1530_0 += weight; break; case 3334: _histMeanMultiOmegaOmegaBar->fill(_histMeanMultiOmegaOmegaBar->bin(0).xMid(), weight); break; } } } /// Finalize void finalize() { // Normalize inclusive single particle distributions to the average number // of charged particles per event. const double avgNumParts = _weightedTotalPartNum / sumOfWeights(); normalize(_histPtSIn, avgNumParts); normalize(_histPtSOut, avgNumParts); normalize(_histRapidityT, avgNumParts); normalize(_histY3); normalize(_histLogScaledMom, avgNumParts); normalize(_histScaledMom, avgNumParts); // particle spectra scale(_histMultiPiPlus ,1./sumOfWeights()); scale(_histMultiKPlus ,1./sumOfWeights()); scale(_histMultiP ,1./sumOfWeights()); scale(_histMultiPhoton ,1./sumOfWeights()); scale(_histMultiPi0 ,1./sumOfWeights()); scale(_histMultiEta ,1./sumOfWeights()); scale(_histMultiEtaPrime ,1./sumOfWeights()); scale(_histMultiK0 ,1./sumOfWeights()); scale(_histMultiLambda0 ,1./sumOfWeights()); scale(_histMultiXiMinus ,1./sumOfWeights()); scale(_histMultiSigma1385Plus ,1./sumOfWeights()); scale(_histMultiXi1530_0 ,1./sumOfWeights()); scale(_histMultiRho ,1./sumOfWeights()); scale(_histMultiOmega782 ,1./sumOfWeights()); scale(_histMultiKStar892_0 ,1./sumOfWeights()); scale(_histMultiPhi ,1./sumOfWeights()); scale(_histMultiKStar892Plus ,1./sumOfWeights()); //normalize(_histMultiPiPlus ,_weightedTotalNumPiPlus / sumOfWeights()); //normalize(_histMultiKPlus ,_weightedTotalNumKPlus/sumOfWeights()); //normalize(_histMultiP ,_weightedTotalNumP/sumOfWeights()); //normalize(_histMultiPhoton ,_weightedTotalNumPhoton/sumOfWeights()); //normalize(_histMultiPi0 ,_weightedTotalNumPi0/sumOfWeights()); //normalize(_histMultiEta ,_weightedTotalNumEta/sumOfWeights()); //normalize(_histMultiEtaPrime ,_weightedTotalNumEtaPrime/sumOfWeights()); //normalize(_histMultiK0 ,_weightedTotalNumK0/sumOfWeights()); //normalize(_histMultiLambda0 ,_weightedTotalNumLambda0/sumOfWeights()); //normalize(_histMultiXiMinus ,_weightedTotalNumXiMinus/sumOfWeights()); //normalize(_histMultiSigma1385Plus ,_weightedTotalNumSigma1385Plus/sumOfWeights()); //normalize(_histMultiXi1530_0 ,_weightedTotalNumXi1530_0 /sumOfWeights()); //normalize(_histMultiRho ,_weightedTotalNumRho/sumOfWeights()); //normalize(_histMultiOmegaMinus ,_weightedTotalNumOmegaMinus/sumOfWeights()); //normalize(_histMultiKStar892_0 ,_weightedTotalNumKStar892_0/sumOfWeights()); //normalize(_histMultiPhi ,_weightedTotalNumPhi/sumOfWeights()); //normalize(_histMultiKStar892Plus ,_weightedTotalNumKStar892Plus/sumOfWeights()); // event shape normalize(_hist1MinusT); normalize(_histTMinor); normalize(_histOblateness); normalize(_histSphericity); normalize(_histAplanarity); normalize(_histHeavyJetMass); normalize(_histCParam); // mean multiplicities scale(_histChMult , 2.0/sumOfWeights()); // taking into account the binwidth of 2 scale(_histMeanChMult , 1.0/sumOfWeights()); scale(_histMeanChMultRapt05 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt10 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt15 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt20 , 1.0/sumOfWeights()); scale(_histMeanMultiPi0 , 1.0/sumOfWeights()); scale(_histMeanMultiEta , 1.0/sumOfWeights()); scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights()); scale(_histMeanMultiK0 , 1.0/sumOfWeights()); scale(_histMeanMultiRho , 1.0/sumOfWeights()); scale(_histMeanMultiOmega782 , 1.0/sumOfWeights()); scale(_histMeanMultiPhi , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights()); scale(_histMeanMultiLambda0 , 1.0/sumOfWeights()); scale(_histMeanMultiSigma0 , 1.0/sumOfWeights()); scale(_histMeanMultiXiMinus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Plus, 1.0/sumOfWeights()); scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights()); scale(_histMeanMultiOmegaOmegaBar, 1.0/sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. double _weightedTotalPartNum; double _weightedTotalNumPiPlus; double _weightedTotalNumKPlus; double _weightedTotalNumP; double _weightedTotalNumPhoton; double _weightedTotalNumPi0; double _weightedTotalNumEta; double _weightedTotalNumEtaPrime; double _weightedTotalNumK0; double _weightedTotalNumLambda0; double _weightedTotalNumXiMinus; double _weightedTotalNumSigma1385Plus; double _weightedTotalNumXi1530_0; double _weightedTotalNumRho; double _weightedTotalNumOmega782; double _weightedTotalNumKStar892_0; double _weightedTotalNumPhi; double _weightedTotalNumKStar892Plus; double _numChParticles; /// @name Histograms //@{ Histo1DPtr _histSphericity; Histo1DPtr _histAplanarity; Histo1DPtr _hist1MinusT; Histo1DPtr _histTMinor; Histo1DPtr _histY3; Histo1DPtr _histHeavyJetMass; Histo1DPtr _histCParam; Histo1DPtr _histOblateness; Histo1DPtr _histScaledMom; Histo1DPtr _histRapidityT; Histo1DPtr _histPtSIn; Histo1DPtr _histPtSOut; Histo1DPtr _histJetRate2Durham; Histo1DPtr _histJetRate3Durham; Histo1DPtr _histJetRate4Durham; Histo1DPtr _histJetRate5Durham; Histo1DPtr _histLogScaledMom; Histo1DPtr _histChMult; Histo1DPtr _histMultiPiPlus; Histo1DPtr _histMultiKPlus; Histo1DPtr _histMultiP; Histo1DPtr _histMultiPhoton; Histo1DPtr _histMultiPi0; Histo1DPtr _histMultiEta; Histo1DPtr _histMultiEtaPrime; Histo1DPtr _histMultiK0; Histo1DPtr _histMultiLambda0; Histo1DPtr _histMultiXiMinus; Histo1DPtr _histMultiSigma1385Plus; Histo1DPtr _histMultiXi1530_0; Histo1DPtr _histMultiRho; Histo1DPtr _histMultiOmega782; Histo1DPtr _histMultiKStar892_0; Histo1DPtr _histMultiPhi; Histo1DPtr _histMultiKStar892Plus; // mean multiplicities Histo1DPtr _histMeanChMult; Histo1DPtr _histMeanChMultRapt05; Histo1DPtr _histMeanChMultRapt10; Histo1DPtr _histMeanChMultRapt15; Histo1DPtr _histMeanChMultRapt20; Histo1DPtr _histMeanMultiPi0; Histo1DPtr _histMeanMultiEta; Histo1DPtr _histMeanMultiEtaPrime; Histo1DPtr _histMeanMultiK0; Histo1DPtr _histMeanMultiRho; Histo1DPtr _histMeanMultiOmega782; Histo1DPtr _histMeanMultiPhi; Histo1DPtr _histMeanMultiKStar892Plus; Histo1DPtr _histMeanMultiKStar892_0; Histo1DPtr _histMeanMultiLambda0; Histo1DPtr _histMeanMultiSigma0; Histo1DPtr _histMeanMultiXiMinus; Histo1DPtr _histMeanMultiSigma1385Plus; Histo1DPtr _histMeanMultiXi1530_0; Histo1DPtr _histMeanMultiOmegaOmegaBar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_1996_S3486095); } diff --git a/analyses/pluginLEP/ALEPH_1999_S4193598.cc b/analyses/pluginLEP/ALEPH_1999_S4193598.cc --- a/analyses/pluginLEP/ALEPH_1999_S4193598.cc +++ b/analyses/pluginLEP/ALEPH_1999_S4193598.cc @@ -1,78 +1,78 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class ALEPH_1999_S4193598 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ALEPH_1999_S4193598() : Analysis("ALEPH_1999_S4193598") { } //@} public: /// Book histograms and initialise projections before the run void init() { declare(Beam(), "Beams"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); declare(ChargedFinalState(), "CFS"); _h_Xe_Ds = bookHisto1D(1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // Trigger condition const ChargedFinalState& cfs = apply(event, "CFS"); if (cfs.size() < 5) vetoEvent; - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); // Get beams and average beam momentum const ParticlePair& beams = apply(event, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0/GeV; // Accept all D*+- decays. Normalisation to data in finalize for (const Particle& p : filter_select(ufs.particles(), Cuts::abspid==PID::DSTARPLUS)) { // Scaled energy. const double energy = p.E()/GeV; const double scaledEnergy = energy/meanBeamMom; _h_Xe_Ds->fill(scaledEnergy, weight); } } /// Normalise histograms etc., after the run void finalize() { // Normalize to data integral normalize(_h_Xe_Ds, 0.00498); } private: Histo1DPtr _h_Xe_Ds; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_1999_S4193598); } diff --git a/analyses/pluginLEP/ALEPH_2002_S4823664.cc b/analyses/pluginLEP/ALEPH_2002_S4823664.cc --- a/analyses/pluginLEP/ALEPH_2002_S4823664.cc +++ b/analyses/pluginLEP/ALEPH_2002_S4823664.cc @@ -1,91 +1,91 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief ALEPH eta/omega fragmentation function paper /// @author Peter Richardson class ALEPH_2002_S4823664 : public Analysis { public: /// Constructor ALEPH_2002_S4823664() : Analysis("ALEPH_2002_S4823664") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpEta = bookHisto1D( 2, 1, 2); _histXpOmega = bookHisto1D( 3, 1, 2); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { if(p.abspid()==221) { double xp = p.p3().mod()/meanBeamMom; _histXpEta->fill(xp, weight); } else if(p.abspid()==223) { double xp = p.p3().mod()/meanBeamMom; _histXpOmega->fill(xp, weight); } } } /// Finalize void finalize() { scale(_histXpEta , 1./sumOfWeights()); scale(_histXpOmega, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXpEta; Histo1DPtr _histXpOmega; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_2002_S4823664); } diff --git a/analyses/pluginLEP/ALEPH_2014_I1267648.cc b/analyses/pluginLEP/ALEPH_2014_I1267648.cc --- a/analyses/pluginLEP/ALEPH_2014_I1267648.cc +++ b/analyses/pluginLEP/ALEPH_2014_I1267648.cc @@ -1,142 +1,142 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class ALEPH_2014_I1267648 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALEPH_2014_I1267648); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_pip0 = bookHisto1D(1, 1, 1); _h_pi2p0 = bookHisto1D(2, 1, 1); _h_pi3p0 = bookHisto1D(3, 1, 1); _h_3pi = bookHisto1D(4, 1, 1); _h_3pip0 = bookHisto1D(5, 1, 1); } // Helper function to look for specific decays bool isSpecificDecay(const Particle& mother, vector ids) { // Trivial check to ignore any other decays but the one in question modulo photons const Particles children = mother.children(Cuts::pid!=PID::PHOTON); if (children.size()!=ids.size()) return false; // Specific bits for tau -> pi decays unsigned int n_pi0(0), n_piplus(0), n_piminus(0), n_nutau(0), n_nutaubar(0); for (int id : ids) { if (id == PID::PI0) n_pi0++; else if (id == PID::PIPLUS) n_piplus++; else if (id == PID::PIMINUS) n_piminus++; else if (id == PID::NU_TAU) n_nutau++; else if (id == PID::NU_TAUBAR) n_nutaubar++; } // Check for the explicit decay -- easy as we only deal with pi0 and pi+/- if ( count(children, hasPID(PID::PI0)) != n_pi0 ) return false; if ( count(children, hasPID(PID::PIPLUS)) != n_piplus ) return false; if ( count(children, hasPID(PID::PIMINUS)) != n_piminus ) return false; if ( count(children, hasPID(PID::NU_TAU)) != n_nutau ) return false; if ( count(children, hasPID(PID::NU_TAUBAR)) != n_nutaubar ) return false; return true; } // Conveniece function to get m2 of sum of all hadronic tau decay product 4-vectors double hadronicm2(const Particle& mother) { FourMomentum p_tot(0,0,0,0); // Iterate over all children that are mesons for (const Particle & meson : filter_select(mother.children(), isMeson)) { // Add this mesons 4-momentum to total 4-momentum p_tot += meson.momentum(); } return p_tot.mass2(); } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over taus - for(const Particle& tau : apply(event, "UFS").particles(Cuts::abspid==PID::TAU)) { + for(const Particle& tau : apply(event, "UFS").particles(Cuts::abspid==PID::TAU)) { // tau -> pi pi0 nu_tau (both charges) if (isSpecificDecay(tau, {PID::PIPLUS, PID::PI0, PID::NU_TAUBAR}) || isSpecificDecay(tau, {PID::PIMINUS, PID::PI0, PID::NU_TAU}) ) { _h_pip0->fill(hadronicm2(tau), event.weight()); } // tau -> pi pi0 pi0 nu_tau (both charges) else if (isSpecificDecay(tau, {PID::PIPLUS, PID::PI0, PID::PI0, PID::NU_TAUBAR}) || isSpecificDecay(tau, {PID::PIMINUS, PID::PI0, PID::PI0, PID::NU_TAU}) ) { _h_pi2p0->fill(hadronicm2(tau), event.weight()); } // tau -> pi pi0 pi0 pi0 (3,1,1) else if (isSpecificDecay(tau, {PID::PIPLUS, PID::PI0, PID::PI0, PID::PI0, PID::NU_TAUBAR}) || isSpecificDecay(tau, {PID::PIMINUS, PID::PI0, PID::PI0, PID::PI0, PID::NU_TAU}) ) { _h_pi3p0->fill(hadronicm2(tau), event.weight()); } // tau -> 3 charged pions (4,1,1) else if (isSpecificDecay(tau, {PID::PIPLUS, PID::PIPLUS, PID::PIMINUS, PID::NU_TAUBAR}) || isSpecificDecay(tau, {PID::PIMINUS, PID::PIMINUS, PID::PIPLUS, PID::NU_TAU}) ) { _h_3pi->fill(hadronicm2(tau), event.weight()); } // tau -> 3 charged pions + pi0 (5,1,1) else if (isSpecificDecay(tau, {PID::PIPLUS, PID::PIPLUS, PID::PIMINUS, PID::PI0, PID::NU_TAUBAR}) || isSpecificDecay(tau, {PID::PIMINUS, PID::PIMINUS, PID::PIPLUS, PID::PI0, PID::NU_TAU}) ) { _h_3pip0->fill(hadronicm2(tau), event.weight()); } // } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_pip0); // normalize to unity normalize(_h_pi2p0); // normalize to unity normalize(_h_pi3p0); // nor\pi^0malize to unity normalize(_h_3pi); // normalize to unity normalize(_h_3pip0); // normalize to unity } //@} private: /// @name Histograms //@{ Histo1DPtr _h_pip0; Histo1DPtr _h_pi2p0; Histo1DPtr _h_pi3p0; Histo1DPtr _h_3pi; Histo1DPtr _h_3pip0; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_2014_I1267648); } diff --git a/analyses/pluginLEP/DELPHI_1995_S3137023.cc b/analyses/pluginLEP/DELPHI_1995_S3137023.cc --- a/analyses/pluginLEP/DELPHI_1995_S3137023.cc +++ b/analyses/pluginLEP/DELPHI_1995_S3137023.cc @@ -1,107 +1,107 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief DELPHI strange baryon paper /// @author Hendrik Hoeth class DELPHI_1995_S3137023 : public Analysis { public: /// Constructor DELPHI_1995_S3137023() : Analysis("DELPHI_1995_S3137023") { _weightedTotalNumXiMinus = 0; _weightedTotalNumSigma1385Plus = 0; } /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpXiMinus = bookHisto1D(2, 1, 1); _histXpSigma1385Plus = bookHisto1D(3, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); switch (id) { case 3312: _histXpXiMinus->fill(p.p3().mod()/meanBeamMom, weight); _weightedTotalNumXiMinus += weight; break; case 3114: case 3224: _histXpSigma1385Plus->fill(p.p3().mod()/meanBeamMom, weight); _weightedTotalNumSigma1385Plus += weight; break; } } } /// Finalize void finalize() { normalize(_histXpXiMinus , _weightedTotalNumXiMinus/sumOfWeights()); normalize(_histXpSigma1385Plus , _weightedTotalNumSigma1385Plus/sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. double _weightedTotalNumXiMinus; double _weightedTotalNumSigma1385Plus; Histo1DPtr _histXpXiMinus; Histo1DPtr _histXpSigma1385Plus; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_1995_S3137023); } diff --git a/analyses/pluginLEP/DELPHI_1996_S3430090.cc b/analyses/pluginLEP/DELPHI_1996_S3430090.cc --- a/analyses/pluginLEP/DELPHI_1996_S3430090.cc +++ b/analyses/pluginLEP/DELPHI_1996_S3430090.cc @@ -1,552 +1,552 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /** * @brief DELPHI event shapes and identified particle spectra * @author Andy Buckley * @author Hendrik Hoeth * * This is the paper which was used for the original PROFESSOR MC tuning * study. It studies a wide range of e+ e- event shape variables, differential * jet rates in the Durham and JADE schemes, and incorporates identified * particle spectra, from other LEP analyses. * * @par Run conditions * * @arg LEP1 beam energy: \f$ \sqrt{s} = \f$ 91.2 GeV * @arg Run with generic QCD events. * @arg No \f$ p_\perp^\text{min} \f$ cutoff is required */ class DELPHI_1996_S3430090 : public Analysis { public: /// Constructor DELPHI_1996_S3430090() : Analysis("DELPHI_1996_S3430090") { _weightedTotalPartNum = 0.0; _passedCutWeightSum = 0.0; _passedCut3WeightSum = 0.0; _passedCut4WeightSum = 0.0; _passedCut5WeightSum = 0.0; } /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); // Don't try to introduce a pT or eta cut here. It's all corrected // back. (See Section 2 of the paper.) const ChargedFinalState cfs; declare(cfs, "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); declare(FastJets(cfs, FastJets::JADE, 0.7), "JadeJets"); declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets"); declare(Sphericity(cfs), "Sphericity"); declare(ParisiTensor(cfs), "Parisi"); const Thrust thrust(cfs); declare(thrust, "Thrust"); declare(Hemispheres(thrust), "Hemispheres"); _histPtTIn = bookHisto1D(1, 1, 1); _histPtTOut = bookHisto1D(2, 1, 1); _histPtSIn = bookHisto1D(3, 1, 1); _histPtSOut = bookHisto1D(4, 1, 1); _histRapidityT = bookHisto1D(5, 1, 1); _histRapidityS = bookHisto1D(6, 1, 1); _histScaledMom = bookHisto1D(7, 1, 1); _histLogScaledMom = bookHisto1D(8, 1, 1); _histPtTOutVsXp = bookProfile1D(9, 1, 1); _histPtVsXp = bookProfile1D(10, 1, 1); _hist1MinusT = bookHisto1D(11, 1, 1); _histTMajor = bookHisto1D(12, 1, 1); _histTMinor = bookHisto1D(13, 1, 1); _histOblateness = bookHisto1D(14, 1, 1); _histSphericity = bookHisto1D(15, 1, 1); _histAplanarity = bookHisto1D(16, 1, 1); _histPlanarity = bookHisto1D(17, 1, 1); _histCParam = bookHisto1D(18, 1, 1); _histDParam = bookHisto1D(19, 1, 1); _histHemiMassH = bookHisto1D(20, 1, 1); _histHemiMassL = bookHisto1D(21, 1, 1); _histHemiMassD = bookHisto1D(22, 1, 1); _histHemiBroadW = bookHisto1D(23, 1, 1); _histHemiBroadN = bookHisto1D(24, 1, 1); _histHemiBroadT = bookHisto1D(25, 1, 1); _histHemiBroadD = bookHisto1D(26, 1, 1); // Binned in y_cut _histDiffRate2Durham = bookHisto1D(27, 1, 1); _histDiffRate2Jade = bookHisto1D(28, 1, 1); _histDiffRate3Durham = bookHisto1D(29, 1, 1); _histDiffRate3Jade = bookHisto1D(30, 1, 1); _histDiffRate4Durham = bookHisto1D(31, 1, 1); _histDiffRate4Jade = bookHisto1D(32, 1, 1); // Binned in cos(chi) _histEEC = bookHisto1D(33, 1, 1); _histAEEC = bookHisto1D(34, 1, 1); _histMultiCharged = bookHisto1D(35, 1, 1); _histMultiPiPlus = bookHisto1D(36, 1, 1); _histMultiPi0 = bookHisto1D(36, 1, 2); _histMultiKPlus = bookHisto1D(36, 1, 3); _histMultiK0 = bookHisto1D(36, 1, 4); _histMultiEta = bookHisto1D(36, 1, 5); _histMultiEtaPrime = bookHisto1D(36, 1, 6); _histMultiDPlus = bookHisto1D(36, 1, 7); _histMultiD0 = bookHisto1D(36, 1, 8); _histMultiBPlus0 = bookHisto1D(36, 1, 9); _histMultiF0 = bookHisto1D(37, 1, 1); _histMultiRho = bookHisto1D(38, 1, 1); _histMultiKStar892Plus = bookHisto1D(38, 1, 2); _histMultiKStar892_0 = bookHisto1D(38, 1, 3); _histMultiPhi = bookHisto1D(38, 1, 4); _histMultiDStar2010Plus = bookHisto1D(38, 1, 5); _histMultiF2 = bookHisto1D(39, 1, 1); _histMultiK2Star1430_0 = bookHisto1D(39, 1, 2); _histMultiP = bookHisto1D(40, 1, 1); _histMultiLambda0 = bookHisto1D(40, 1, 2); _histMultiXiMinus = bookHisto1D(40, 1, 3); _histMultiOmegaMinus = bookHisto1D(40, 1, 4); _histMultiDeltaPlusPlus = bookHisto1D(40, 1, 5); _histMultiSigma1385Plus = bookHisto1D(40, 1, 6); _histMultiXi1530_0 = bookHisto1D(40, 1, 7); _histMultiLambdaB0 = bookHisto1D(40, 1, 8); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); const double weight = e.weight(); _passedCutWeightSum += weight; _weightedTotalPartNum += numParticles * weight; // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Thrusts MSG_DEBUG("Calculating thrust"); const Thrust& thrust = apply(e, "Thrust"); _hist1MinusT->fill(1 - thrust.thrust(), weight); _histTMajor->fill(thrust.thrustMajor(), weight); _histTMinor->fill(thrust.thrustMinor(), weight); _histOblateness->fill(thrust.oblateness(), weight); // Jets const FastJets& durjet = apply(e, "DurhamJets"); const FastJets& jadejet = apply(e, "JadeJets"); if (numParticles >= 3) { _passedCut3WeightSum += weight; if (durjet.clusterSeq()) _histDiffRate2Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(2), weight); if (jadejet.clusterSeq()) _histDiffRate2Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(2), weight); } if (numParticles >= 4) { _passedCut4WeightSum += weight; if (durjet.clusterSeq()) _histDiffRate3Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(3), weight); if (jadejet.clusterSeq()) _histDiffRate3Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(3), weight); } if (numParticles >= 5) { _passedCut5WeightSum += weight; if (durjet.clusterSeq()) _histDiffRate4Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(4), weight); if (jadejet.clusterSeq()) _histDiffRate4Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(4), weight); } // Sphericities MSG_DEBUG("Calculating sphericity"); const Sphericity& sphericity = apply(e, "Sphericity"); _histSphericity->fill(sphericity.sphericity(), weight); _histAplanarity->fill(sphericity.aplanarity(), weight); _histPlanarity->fill(sphericity.planarity(), weight); // C & D params MSG_DEBUG("Calculating Parisi params"); const ParisiTensor& parisi = apply(e, "Parisi"); _histCParam->fill(parisi.C(), weight); _histDParam->fill(parisi.D(), weight); // Hemispheres MSG_DEBUG("Calculating hemisphere variables"); const Hemispheres& hemi = apply(e, "Hemispheres"); _histHemiMassH->fill(hemi.scaledM2high(), weight); _histHemiMassL->fill(hemi.scaledM2low(), weight); _histHemiMassD->fill(hemi.scaledM2diff(), weight); _histHemiBroadW->fill(hemi.Bmax(), weight); _histHemiBroadN->fill(hemi.Bmin(), weight); _histHemiBroadT->fill(hemi.Bsum(), weight); _histHemiBroadD->fill(hemi.Bdiff(), weight); // Iterate over all the charged final state particles. double Evis = 0.0; double Evis2 = 0.0; MSG_DEBUG("About to iterate over charged FS particles"); foreach (const Particle& p, fs.particles()) { // Get momentum and energy of each particle. const Vector3 mom3 = p.p3(); const double energy = p.E(); Evis += energy; // Scaled momenta. const double mom = mom3.mod(); const double scaledMom = mom/meanBeamMom; const double logInvScaledMom = -std::log(scaledMom); _histLogScaledMom->fill(logInvScaledMom, weight); _histScaledMom->fill(scaledMom, weight); // Get momenta components w.r.t. thrust and sphericity. const double momT = dot(thrust.thrustAxis(), mom3); const double momS = dot(sphericity.sphericityAxis(), mom3); const double pTinT = dot(mom3, thrust.thrustMajorAxis()); const double pToutT = dot(mom3, thrust.thrustMinorAxis()); const double pTinS = dot(mom3, sphericity.sphericityMajorAxis()); const double pToutS = dot(mom3, sphericity.sphericityMinorAxis()); const double pT = sqrt(pow(pTinT, 2) + pow(pToutT, 2)); _histPtTIn->fill(fabs(pTinT/GeV), weight); _histPtTOut->fill(fabs(pToutT/GeV), weight); _histPtSIn->fill(fabs(pTinS/GeV), weight); _histPtSOut->fill(fabs(pToutS/GeV), weight); _histPtVsXp->fill(scaledMom, fabs(pT/GeV), weight); _histPtTOutVsXp->fill(scaledMom, fabs(pToutT/GeV), weight); // Calculate rapidities w.r.t. thrust and sphericity. const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT)); const double rapidityS = 0.5 * std::log((energy + momS) / (energy - momS)); _histRapidityT->fill(fabs(rapidityT), weight); _histRapidityS->fill(fabs(rapidityS), weight); MSG_TRACE(fabs(rapidityT) << " " << scaledMom/GeV); } Evis2 = Evis*Evis; // (A)EEC // Need iterators since second loop starts at current outer loop iterator, i.e. no "foreach" here! for (Particles::const_iterator p_i = fs.particles().begin(); p_i != fs.particles().end(); ++p_i) { for (Particles::const_iterator p_j = p_i; p_j != fs.particles().end(); ++p_j) { if (p_i == p_j) continue; const Vector3 mom3_i = p_i->momentum().p3(); const Vector3 mom3_j = p_j->momentum().p3(); const double energy_i = p_i->momentum().E(); const double energy_j = p_j->momentum().E(); const double cosij = dot(mom3_i.unit(), mom3_j.unit()); const double eec = (energy_i*energy_j) / Evis2; _histEEC->fill(cosij, eec*weight); if (cosij < 0) _histAEEC->fill( cosij, eec*weight); else _histAEEC->fill(-cosij, -eec*weight); } } _histMultiCharged->fill(_histMultiCharged->bin(0).xMid(), numParticles*weight); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { int id = p.abspid(); switch (id) { case 211: _histMultiPiPlus->fill(_histMultiPiPlus->bin(0).xMid(), weight); break; case 111: _histMultiPi0->fill(_histMultiPi0->bin(0).xMid(), weight); break; case 321: _histMultiKPlus->fill(_histMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMultiK0->fill(_histMultiK0->bin(0).xMid(), weight); break; case 221: _histMultiEta->fill(_histMultiEta->bin(0).xMid(), weight); break; case 331: _histMultiEtaPrime->fill(_histMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMultiDPlus->fill(_histMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMultiD0->fill(_histMultiD0->bin(0).xMid(), weight); break; case 511: case 521: case 531: _histMultiBPlus0->fill(_histMultiBPlus0->bin(0).xMid(), weight); break; case 9010221: _histMultiF0->fill(_histMultiF0->bin(0).xMid(), weight); break; case 113: _histMultiRho->fill(_histMultiRho->bin(0).xMid(), weight); break; case 323: _histMultiKStar892Plus->fill(_histMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMultiKStar892_0->fill(_histMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMultiPhi->fill(_histMultiPhi->bin(0).xMid(), weight); break; case 413: _histMultiDStar2010Plus->fill(_histMultiDStar2010Plus->bin(0).xMid(), weight); break; case 225: _histMultiF2->fill(_histMultiF2->bin(0).xMid(), weight); break; case 315: _histMultiK2Star1430_0->fill(_histMultiK2Star1430_0->bin(0).xMid(), weight); break; case 2212: _histMultiP->fill(_histMultiP->bin(0).xMid(), weight); break; case 3122: _histMultiLambda0->fill(_histMultiLambda0->bin(0).xMid(), weight); break; case 3312: _histMultiXiMinus->fill(_histMultiXiMinus->bin(0).xMid(), weight); break; case 3334: _histMultiOmegaMinus->fill(_histMultiOmegaMinus->bin(0).xMid(), weight); break; case 2224: _histMultiDeltaPlusPlus->fill(_histMultiDeltaPlusPlus->bin(0).xMid(), weight); break; case 3114: _histMultiSigma1385Plus->fill(_histMultiSigma1385Plus->bin(0).xMid(), weight); break; case 3324: _histMultiXi1530_0->fill(_histMultiXi1530_0->bin(0).xMid(), weight); break; case 5122: _histMultiLambdaB0->fill(_histMultiLambdaB0->bin(0).xMid(), weight); break; } } } // Finalize void finalize() { // Normalize inclusive single particle distributions to the average number // of charged particles per event. const double avgNumParts = _weightedTotalPartNum / _passedCutWeightSum; normalize(_histPtTIn, avgNumParts); normalize(_histPtTOut, avgNumParts); normalize(_histPtSIn, avgNumParts); normalize(_histPtSOut, avgNumParts); normalize(_histRapidityT, avgNumParts); normalize(_histRapidityS, avgNumParts); normalize(_histLogScaledMom, avgNumParts); normalize(_histScaledMom, avgNumParts); scale(_histEEC, 1.0/_passedCutWeightSum); scale(_histAEEC, 1.0/_passedCutWeightSum); scale(_histMultiCharged, 1.0/_passedCutWeightSum); scale(_histMultiPiPlus, 1.0/_passedCutWeightSum); scale(_histMultiPi0, 1.0/_passedCutWeightSum); scale(_histMultiKPlus, 1.0/_passedCutWeightSum); scale(_histMultiK0, 1.0/_passedCutWeightSum); scale(_histMultiEta, 1.0/_passedCutWeightSum); scale(_histMultiEtaPrime, 1.0/_passedCutWeightSum); scale(_histMultiDPlus, 1.0/_passedCutWeightSum); scale(_histMultiD0, 1.0/_passedCutWeightSum); scale(_histMultiBPlus0, 1.0/_passedCutWeightSum); scale(_histMultiF0, 1.0/_passedCutWeightSum); scale(_histMultiRho, 1.0/_passedCutWeightSum); scale(_histMultiKStar892Plus, 1.0/_passedCutWeightSum); scale(_histMultiKStar892_0, 1.0/_passedCutWeightSum); scale(_histMultiPhi, 1.0/_passedCutWeightSum); scale(_histMultiDStar2010Plus, 1.0/_passedCutWeightSum); scale(_histMultiF2, 1.0/_passedCutWeightSum); scale(_histMultiK2Star1430_0, 1.0/_passedCutWeightSum); scale(_histMultiP, 1.0/_passedCutWeightSum); scale(_histMultiLambda0, 1.0/_passedCutWeightSum); scale(_histMultiXiMinus, 1.0/_passedCutWeightSum); scale(_histMultiOmegaMinus, 1.0/_passedCutWeightSum); scale(_histMultiDeltaPlusPlus, 1.0/_passedCutWeightSum); scale(_histMultiSigma1385Plus, 1.0/_passedCutWeightSum); scale(_histMultiXi1530_0, 1.0/_passedCutWeightSum); scale(_histMultiLambdaB0, 1.0/_passedCutWeightSum); scale(_hist1MinusT, 1.0/_passedCutWeightSum); scale(_histTMajor, 1.0/_passedCutWeightSum); scale(_histTMinor, 1.0/_passedCutWeightSum); scale(_histOblateness, 1.0/_passedCutWeightSum); scale(_histSphericity, 1.0/_passedCutWeightSum); scale(_histAplanarity, 1.0/_passedCutWeightSum); scale(_histPlanarity, 1.0/_passedCutWeightSum); scale(_histHemiMassD, 1.0/_passedCutWeightSum); scale(_histHemiMassH, 1.0/_passedCutWeightSum); scale(_histHemiMassL, 1.0/_passedCutWeightSum); scale(_histHemiBroadW, 1.0/_passedCutWeightSum); scale(_histHemiBroadN, 1.0/_passedCutWeightSum); scale(_histHemiBroadT, 1.0/_passedCutWeightSum); scale(_histHemiBroadD, 1.0/_passedCutWeightSum); scale(_histCParam, 1.0/_passedCutWeightSum); scale(_histDParam, 1.0/_passedCutWeightSum); scale(_histDiffRate2Durham, 1.0/_passedCut3WeightSum); scale(_histDiffRate2Jade, 1.0/_passedCut3WeightSum); scale(_histDiffRate3Durham, 1.0/_passedCut4WeightSum); scale(_histDiffRate3Jade, 1.0/_passedCut4WeightSum); scale(_histDiffRate4Durham, 1.0/_passedCut5WeightSum); scale(_histDiffRate4Jade, 1.0/_passedCut5WeightSum); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. double _weightedTotalPartNum; /// @name Sums of weights past various cuts //@{ double _passedCutWeightSum; double _passedCut3WeightSum; double _passedCut4WeightSum; double _passedCut5WeightSum; //@} /// @name Histograms //@{ Histo1DPtr _histPtTIn; Histo1DPtr _histPtTOut; Histo1DPtr _histPtSIn; Histo1DPtr _histPtSOut; Histo1DPtr _histRapidityT; Histo1DPtr _histRapidityS; Histo1DPtr _histScaledMom, _histLogScaledMom; Profile1DPtr _histPtTOutVsXp, _histPtVsXp; Histo1DPtr _hist1MinusT; Histo1DPtr _histTMajor; Histo1DPtr _histTMinor; Histo1DPtr _histOblateness; Histo1DPtr _histSphericity; Histo1DPtr _histAplanarity; Histo1DPtr _histPlanarity; Histo1DPtr _histCParam; Histo1DPtr _histDParam; Histo1DPtr _histHemiMassD; Histo1DPtr _histHemiMassH; Histo1DPtr _histHemiMassL; Histo1DPtr _histHemiBroadW; Histo1DPtr _histHemiBroadN; Histo1DPtr _histHemiBroadT; Histo1DPtr _histHemiBroadD; Histo1DPtr _histDiffRate2Durham; Histo1DPtr _histDiffRate2Jade; Histo1DPtr _histDiffRate3Durham; Histo1DPtr _histDiffRate3Jade; Histo1DPtr _histDiffRate4Durham; Histo1DPtr _histDiffRate4Jade; Histo1DPtr _histEEC, _histAEEC; Histo1DPtr _histMultiCharged; Histo1DPtr _histMultiPiPlus; Histo1DPtr _histMultiPi0; Histo1DPtr _histMultiKPlus; Histo1DPtr _histMultiK0; Histo1DPtr _histMultiEta; Histo1DPtr _histMultiEtaPrime; Histo1DPtr _histMultiDPlus; Histo1DPtr _histMultiD0; Histo1DPtr _histMultiBPlus0; Histo1DPtr _histMultiF0; Histo1DPtr _histMultiRho; Histo1DPtr _histMultiKStar892Plus; Histo1DPtr _histMultiKStar892_0; Histo1DPtr _histMultiPhi; Histo1DPtr _histMultiDStar2010Plus; Histo1DPtr _histMultiF2; Histo1DPtr _histMultiK2Star1430_0; Histo1DPtr _histMultiP; Histo1DPtr _histMultiLambda0; Histo1DPtr _histMultiXiMinus; Histo1DPtr _histMultiOmegaMinus; Histo1DPtr _histMultiDeltaPlusPlus; Histo1DPtr _histMultiSigma1385Plus; Histo1DPtr _histMultiXi1530_0; Histo1DPtr _histMultiLambdaB0; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_1996_S3430090); } diff --git a/analyses/pluginLEP/DELPHI_1999_S3960137.cc b/analyses/pluginLEP/DELPHI_1999_S3960137.cc --- a/analyses/pluginLEP/DELPHI_1999_S3960137.cc +++ b/analyses/pluginLEP/DELPHI_1999_S3960137.cc @@ -1,99 +1,99 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief DELPHI rho,f_0 and f_2 fragmentation function paper /// @author Peter Richardson class DELPHI_1999_S3960137 : public Analysis { public: /// Constructor DELPHI_1999_S3960137() : Analysis("DELPHI_1999_S3960137") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpRho = bookHisto1D( 1, 1, 1); _histXpf0 = bookHisto1D( 1, 1, 2); _histXpf2 = bookHisto1D( 1, 1, 3); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); double xp = p.p3().mod()/meanBeamMom; switch (id) { case 9010221: _histXpf0->fill(xp, weight); break; case 225: _histXpf2->fill(xp, weight); break; case 113: _histXpRho->fill(xp, weight); break; } } } /// Finalize void finalize() { scale(_histXpf0 , 1./sumOfWeights()); scale(_histXpf2 , 1./sumOfWeights()); scale(_histXpRho, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXpf0; Histo1DPtr _histXpf2; Histo1DPtr _histXpRho; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_1999_S3960137); } diff --git a/analyses/pluginLEP/DELPHI_2011_I890503.cc b/analyses/pluginLEP/DELPHI_2011_I890503.cc --- a/analyses/pluginLEP/DELPHI_2011_I890503.cc +++ b/analyses/pluginLEP/DELPHI_2011_I890503.cc @@ -1,85 +1,85 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class DELPHI_2011_I890503 : public Analysis { public: /// Constructor DELPHI_2011_I890503() : Analysis("DELPHI_2011_I890503") { } /// Book projections and histograms void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXbweak = bookHisto1D(1, 1, 1); _histMeanXbweak = bookProfile1D(2, 1, 1); } void analyze(const Event& e) { // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (apply(e, "FS").particles().size() < 2) { MSG_DEBUG("Failed ncharged cut"); vetoEvent; } MSG_DEBUG("Passed ncharged cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); // Get Bottom hadrons const Particles bhads = filter_select(ufs.particles(), isBottomHadron); for (const Particle& bhad : bhads) { // Check for weak decay, i.e. no more bottom present in children if (bhad.children(lastParticleWith(hasBottom)).empty()) { const double xp = bhad.E()/meanBeamMom; _histXbweak->fill(xp, weight); _histMeanXbweak->fill(_histMeanXbweak->bin(0).xMid(), xp, weight); } } } // Finalize void finalize() { normalize(_histXbweak); } private: Histo1DPtr _histXbweak; Profile1DPtr _histMeanXbweak; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_2011_I890503); } diff --git a/analyses/pluginLEP/L3_1992_I336180.cc b/analyses/pluginLEP/L3_1992_I336180.cc --- a/analyses/pluginLEP/L3_1992_I336180.cc +++ b/analyses/pluginLEP/L3_1992_I336180.cc @@ -1,90 +1,90 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief L3 inclusive eta production in hadronic Z0 decays /// @author Simone Amoroso class L3_1992_I336180 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(L3_1992_I336180); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _histXpEta = bookHisto1D( 1, 1, 1); _histLnXpEta = bookHisto1D( 2, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. const FinalState& fs = apply(event, "FS"); if (fs.particles().size() < 2) { MSG_DEBUG("Failed ncharged cut"); vetoEvent; } MSG_DEBUG("Passed ncharged cut"); // Get event weight for histo filling const double weight = event.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(event, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const Particles& etas = apply(event, "UFS").particles(Cuts::abspid==PID::ETA); + const Particles& etas = apply(event, "UFS").particles(Cuts::abspid==PID::ETA); foreach (const Particle& p, etas) { double xp = p.p3().mod()/meanBeamMom; MSG_DEBUG("Eta xp = " << xp); _histXpEta->fill(xp, weight); _histLnXpEta->fill(log(1./xp), weight); } } /// Normalise histograms etc., after the run void finalize() { scale(_histXpEta, 1./sumOfWeights()); scale(_histLnXpEta, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXpEta; Histo1DPtr _histLnXpEta; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(L3_1992_I336180); } diff --git a/analyses/pluginLEP/OPAL_1993_I342766.cc b/analyses/pluginLEP/OPAL_1993_I342766.cc --- a/analyses/pluginLEP/OPAL_1993_I342766.cc +++ b/analyses/pluginLEP/OPAL_1993_I342766.cc @@ -1,91 +1,91 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief A Measurement of K*+- (892) production in hadronic Z0 decays /// @author Simone Amoroso class OPAL_1993_I342766 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_1993_I342766); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _histXeKStar892 = bookHisto1D( 1, 1, 1); _histMeanKStar892 = bookHisto1D( 2, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = event.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(event, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); foreach (const Particle& p, ufs.particles(Cuts::abspid==323)) { double xp = p.p3().mod()/meanBeamMom; _histXeKStar892->fill(xp, weight); _histMeanKStar892->fill(_histMeanKStar892->bin(0).xMid(), weight); } } /// Normalise histograms etc., after the run void finalize() { scale(_histXeKStar892, 1./sumOfWeights()); scale(_histMeanKStar892, 1./sumOfWeights()); } //@} private: /// @name Histograms Histo1DPtr _histXeKStar892; Histo1DPtr _histMeanKStar892; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1993_I342766); } diff --git a/analyses/pluginLEP/OPAL_1995_S3198391.cc b/analyses/pluginLEP/OPAL_1995_S3198391.cc --- a/analyses/pluginLEP/OPAL_1995_S3198391.cc +++ b/analyses/pluginLEP/OPAL_1995_S3198391.cc @@ -1,84 +1,84 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL Delta++ fragmentation function paper /// @author Peter Richardson class OPAL_1995_S3198391 : public Analysis { public: /// Constructor OPAL_1995_S3198391() : Analysis("OPAL_1995_S3198391") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpDelta = bookHisto1D( 1, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { if(p.abspid()==2224) { double xp = p.p3().mod()/meanBeamMom; _histXpDelta->fill(xp, weight); } } } /// Finalize void finalize() { scale(_histXpDelta, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXpDelta; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1995_S3198391); } diff --git a/analyses/pluginLEP/OPAL_1996_S3257789.cc b/analyses/pluginLEP/OPAL_1996_S3257789.cc --- a/analyses/pluginLEP/OPAL_1996_S3257789.cc +++ b/analyses/pluginLEP/OPAL_1996_S3257789.cc @@ -1,97 +1,97 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL J/Psi fragmentation function paper /// @author Peter Richardson class OPAL_1996_S3257789 : public Analysis { public: /// Constructor OPAL_1996_S3257789() : Analysis("OPAL_1996_S3257789"), _weightSum(0.) {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpJPsi = bookHisto1D( 1, 1, 1); _multJPsi = bookHisto1D( 2, 1, 1); _multPsiPrime = bookHisto1D( 2, 1, 2); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { if(p.abspid()==443) { double xp = p.p3().mod()/meanBeamMom; _histXpJPsi->fill(xp, weight); _multJPsi->fill(91.2,weight); _weightSum += weight; } else if(p.abspid()==100443) { _multPsiPrime->fill(91.2,weight); } } } /// Finalize void finalize() { if(_weightSum>0.) scale(_histXpJPsi , 0.1/_weightSum); scale(_multJPsi , 1./sumOfWeights()); scale(_multPsiPrime, 1./sumOfWeights()); } //@} private: double _weightSum; Histo1DPtr _histXpJPsi; Histo1DPtr _multJPsi; Histo1DPtr _multPsiPrime; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1996_S3257789); } diff --git a/analyses/pluginLEP/OPAL_1997_S3396100.cc b/analyses/pluginLEP/OPAL_1997_S3396100.cc --- a/analyses/pluginLEP/OPAL_1997_S3396100.cc +++ b/analyses/pluginLEP/OPAL_1997_S3396100.cc @@ -1,163 +1,163 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL strange baryon paper /// @author Peter Richardson class OPAL_1997_S3396100 : public Analysis { public: /// Constructor OPAL_1997_S3396100() : Analysis("OPAL_1997_S3396100"), _weightedTotalNumLambda(0.) ,_weightedTotalNumXiMinus(0.), _weightedTotalNumSigma1385Plus(0.),_weightedTotalNumSigma1385Minus(0.), _weightedTotalNumXi1530(0.) ,_weightedTotalNumLambda1520(0.) {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpLambda = bookHisto1D( 1, 1, 1); _histXiLambda = bookHisto1D( 2, 1, 1); _histXpXiMinus = bookHisto1D( 3, 1, 1); _histXiXiMinus = bookHisto1D( 4, 1, 1); _histXpSigma1385Plus = bookHisto1D( 5, 1, 1); _histXiSigma1385Plus = bookHisto1D( 6, 1, 1); _histXpSigma1385Minus = bookHisto1D( 7, 1, 1); _histXiSigma1385Minus = bookHisto1D( 8, 1, 1); _histXpXi1530 = bookHisto1D( 9, 1, 1); _histXiXi1530 = bookHisto1D(10, 1, 1); _histXpLambda1520 = bookHisto1D(11, 1, 1); _histXiLambda1520 = bookHisto1D(12, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); double xp = p.p3().mod()/meanBeamMom; double xi = -log(xp); switch (id) { case 3312: _histXpXiMinus->fill(xp, weight); _histXiXiMinus->fill(xi, weight); _weightedTotalNumXiMinus += weight; break; case 3224: _histXpSigma1385Plus->fill(xp, weight); _histXiSigma1385Plus->fill(xi, weight); _weightedTotalNumSigma1385Plus += weight; break; case 3114: _histXpSigma1385Minus->fill(xp, weight); _histXiSigma1385Minus->fill(xi, weight); _weightedTotalNumSigma1385Minus += weight; break; case 3122: _histXpLambda->fill(xp, weight); _histXiLambda->fill(xi, weight); _weightedTotalNumLambda += weight; break; case 3324: _histXpXi1530->fill(xp, weight); _histXiXi1530->fill(xi, weight); _weightedTotalNumXi1530 += weight; break; case 3124: _histXpLambda1520->fill(xp, weight); _histXiLambda1520->fill(xi, weight); _weightedTotalNumLambda1520 += weight; break; } } } /// Finalize void finalize() { normalize(_histXpLambda , _weightedTotalNumLambda /sumOfWeights()); normalize(_histXiLambda , _weightedTotalNumLambda /sumOfWeights()); normalize(_histXpXiMinus , _weightedTotalNumXiMinus /sumOfWeights()); normalize(_histXiXiMinus , _weightedTotalNumXiMinus /sumOfWeights()); normalize(_histXpSigma1385Plus , _weightedTotalNumSigma1385Plus/sumOfWeights()); normalize(_histXiSigma1385Plus , _weightedTotalNumSigma1385Plus/sumOfWeights()); normalize(_histXpSigma1385Minus, _weightedTotalNumSigma1385Plus/sumOfWeights()); normalize(_histXiSigma1385Minus, _weightedTotalNumSigma1385Plus/sumOfWeights()); normalize(_histXpXi1530 , _weightedTotalNumXi1530 /sumOfWeights()); normalize(_histXiXi1530 , _weightedTotalNumXi1530 /sumOfWeights()); normalize(_histXpLambda1520 , _weightedTotalNumLambda1520 /sumOfWeights()); normalize(_histXiLambda1520 , _weightedTotalNumLambda1520 /sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. double _weightedTotalNumLambda; double _weightedTotalNumXiMinus; double _weightedTotalNumSigma1385Plus; double _weightedTotalNumSigma1385Minus; double _weightedTotalNumXi1530; double _weightedTotalNumLambda1520; Histo1DPtr _histXpLambda ; Histo1DPtr _histXiLambda ; Histo1DPtr _histXpXiMinus ; Histo1DPtr _histXiXiMinus ; Histo1DPtr _histXpSigma1385Plus ; Histo1DPtr _histXiSigma1385Plus ; Histo1DPtr _histXpSigma1385Minus; Histo1DPtr _histXiSigma1385Minus; Histo1DPtr _histXpXi1530 ; Histo1DPtr _histXiXi1530 ; Histo1DPtr _histXpLambda1520 ; Histo1DPtr _histXiLambda1520 ; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1997_S3396100); } diff --git a/analyses/pluginLEP/OPAL_1997_S3608263.cc b/analyses/pluginLEP/OPAL_1997_S3608263.cc --- a/analyses/pluginLEP/OPAL_1997_S3608263.cc +++ b/analyses/pluginLEP/OPAL_1997_S3608263.cc @@ -1,85 +1,85 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL K*0 fragmentation function paper /// @author Peter Richardson class OPAL_1997_S3608263 : public Analysis { public: /// Constructor OPAL_1997_S3608263() : Analysis("OPAL_1997_S3608263") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXeK0 = bookHisto1D( 1, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); if (id==313) { double xp = p.p3().mod()/meanBeamMom; _histXeK0->fill(xp, weight); } } } /// Finalize void finalize() { scale(_histXeK0, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXeK0; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1997_S3608263); } diff --git a/analyses/pluginLEP/OPAL_1998_S3702294.cc b/analyses/pluginLEP/OPAL_1998_S3702294.cc --- a/analyses/pluginLEP/OPAL_1998_S3702294.cc +++ b/analyses/pluginLEP/OPAL_1998_S3702294.cc @@ -1,99 +1,99 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL f0,f2 and phi fragmentation function paper /// @author Peter Richardson class OPAL_1998_S3702294 : public Analysis { public: /// Constructor OPAL_1998_S3702294() : Analysis("OPAL_1998_S3702294") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXpf0 = bookHisto1D( 2, 1, 1); _histXpf2 = bookHisto1D( 2, 1, 2); _histXpPhi = bookHisto1D( 2, 1, 3); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); double xp = p.p3().mod()/meanBeamMom; switch (id) { case 9010221: _histXpf0->fill(xp, weight); break; case 225: _histXpf2->fill(xp, weight); break; case 333: _histXpPhi->fill(xp, weight); break; } } } /// Finalize void finalize() { scale(_histXpf0 , 1./sumOfWeights()); scale(_histXpf2 , 1./sumOfWeights()); scale(_histXpPhi, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXpf0; Histo1DPtr _histXpf2; Histo1DPtr _histXpPhi; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1998_S3702294); } diff --git a/analyses/pluginLEP/OPAL_1998_S3749908.cc b/analyses/pluginLEP/OPAL_1998_S3749908.cc --- a/analyses/pluginLEP/OPAL_1998_S3749908.cc +++ b/analyses/pluginLEP/OPAL_1998_S3749908.cc @@ -1,152 +1,152 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL photon/light meson paper /// @author Peter Richardson class OPAL_1998_S3749908 : public Analysis { public: /// Constructor OPAL_1998_S3749908() : Analysis("OPAL_1998_S3749908") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXePhoton = bookHisto1D( 2, 1, 1); _histXiPhoton = bookHisto1D( 3, 1, 1); _histXePi = bookHisto1D( 4, 1, 1); _histXiPi = bookHisto1D( 5, 1, 1); _histXeEta = bookHisto1D( 6, 1, 1); _histXiEta = bookHisto1D( 7, 1, 1); _histXeRho = bookHisto1D( 8, 1, 1); _histXiRho = bookHisto1D( 9, 1, 1); _histXeOmega = bookHisto1D(10, 1, 1); _histXiOmega = bookHisto1D(11, 1, 1); _histXeEtaPrime = bookHisto1D(12, 1, 1); _histXiEtaPrime = bookHisto1D(13, 1, 1); _histXeA0 = bookHisto1D(14, 1, 1); _histXiA0 = bookHisto1D(15, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); double xi = -log(p.p3().mod()/meanBeamMom); double xE = p.E()/meanBeamMom; switch (id) { case 22: // Photons _histXePhoton->fill(xE, weight); _histXiPhoton->fill(xi, weight); break; case 111: // Neutral pions _histXePi->fill(xE, weight); _histXiPi->fill(xi, weight); break; case 221: // eta _histXeEta->fill(xE, weight); _histXiEta->fill(xi, weight); break; case 213: // Charged rho (770) _histXeRho->fill(xE, weight); _histXiRho->fill(xi, weight); break; case 223: // omega (782) _histXeOmega->fill(xE, weight); _histXiOmega->fill(xi, weight); break; case 331: // eta' (958) _histXeEtaPrime->fill(xE, weight); _histXiEtaPrime->fill(xi, weight); break; case 9000211: // Charged a_0 (980) _histXeA0->fill(xE, weight); _histXiA0->fill(xi, weight); break; } } } /// Finalize void finalize() { scale(_histXePhoton , 1./sumOfWeights()); scale(_histXiPhoton , 1./sumOfWeights()); scale(_histXePi , 1./sumOfWeights()); scale(_histXiPi , 1./sumOfWeights()); scale(_histXeEta , 1./sumOfWeights()); scale(_histXiEta , 1./sumOfWeights()); scale(_histXeRho , 1./sumOfWeights()); scale(_histXiRho , 1./sumOfWeights()); scale(_histXeOmega , 1./sumOfWeights()); scale(_histXiOmega , 1./sumOfWeights()); scale(_histXeEtaPrime, 1./sumOfWeights()); scale(_histXiEtaPrime, 1./sumOfWeights()); scale(_histXeA0 , 1./sumOfWeights()); scale(_histXiA0 , 1./sumOfWeights()); } //@} private: Histo1DPtr _histXePhoton ; Histo1DPtr _histXiPhoton ; Histo1DPtr _histXePi ; Histo1DPtr _histXiPi ; Histo1DPtr _histXeEta ; Histo1DPtr _histXiEta ; Histo1DPtr _histXeRho ; Histo1DPtr _histXiRho ; Histo1DPtr _histXeOmega ; Histo1DPtr _histXiOmega ; Histo1DPtr _histXeEtaPrime; Histo1DPtr _histXiEtaPrime; Histo1DPtr _histXeA0 ; Histo1DPtr _histXiA0 ; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1998_S3749908); } diff --git a/analyses/pluginLEP/OPAL_2000_S4418603.cc b/analyses/pluginLEP/OPAL_2000_S4418603.cc --- a/analyses/pluginLEP/OPAL_2000_S4418603.cc +++ b/analyses/pluginLEP/OPAL_2000_S4418603.cc @@ -1,86 +1,86 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL K0 fragmentation function paper /// @author Peter Richardson class OPAL_2000_S4418603 : public Analysis { public: /// Constructor OPAL_2000_S4418603() : Analysis("OPAL_2000_S4418603") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histXeK0 = bookHisto1D( 3, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); if (id == PID::K0S || id == PID::K0L) { double xE = p.E()/meanBeamMom; _histXeK0->fill(xE, weight); } } } /// Finalize void finalize() { scale(_histXeK0, 1./sumOfWeights()); } //@} private: Histo1DPtr _histXeK0; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2000_S4418603); } diff --git a/analyses/pluginLEP/OPAL_2003_I599181.cc b/analyses/pluginLEP/OPAL_2003_I599181.cc --- a/analyses/pluginLEP/OPAL_2003_I599181.cc +++ b/analyses/pluginLEP/OPAL_2003_I599181.cc @@ -1,83 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief OPAL b-fragmentation measurement for weak B-hadron decays /// @author Simone Amoroso class OPAL_2003_I599181 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_2003_I599181); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(Beam(), "Beams"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _histXbweak = bookHisto1D(1, 1, 1); _histMeanXbweak = bookProfile1D(2, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Get event weight for histo filling const double weight = event.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(event, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() +beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); // Get Bottom hadrons const Particles bhads = filter_select(ufs.particles(), isBottomHadron); for (const Particle& bhad : bhads) { // Check for weak decay, i.e. no more bottom present in children if (bhad.children(lastParticleWith(hasBottom)).empty()) { const double xp = bhad.E()/meanBeamMom; _histXbweak->fill(xp, weight); _histMeanXbweak->fill(_histMeanXbweak->bin(0).xMid(), xp, weight); } } } /// Normalise histograms etc., after the run void finalize() { normalize(_histXbweak); } //@} private: Histo1DPtr _histXbweak; Profile1DPtr _histMeanXbweak; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2003_I599181); } diff --git a/analyses/pluginLEP/SLD_1999_S3743934.cc b/analyses/pluginLEP/SLD_1999_S3743934.cc --- a/analyses/pluginLEP/SLD_1999_S3743934.cc +++ b/analyses/pluginLEP/SLD_1999_S3743934.cc @@ -1,642 +1,642 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/InitialQuarks.hh" #include "Rivet/Projections/Thrust.hh" namespace Rivet { /// @brief SLD flavour-dependent fragmentation paper /// @author Peter Richardson class SLD_1999_S3743934 : public Analysis { public: /// Constructor SLD_1999_S3743934() : Analysis("SLD_1999_S3743934"), _SumOfudsWeights(0.), _SumOfcWeights(0.), _SumOfbWeights(0.), _multPiPlus(4,0.),_multKPlus(4,0.),_multK0(4,0.), _multKStar0(4,0.),_multPhi(4,0.), _multProton(4,0.),_multLambda(4,0.) { } /// @name Analysis methods //@{ void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed ncharged cut"); vetoEvent; } MSG_DEBUG("Passed ncharged cut"); // Get event weight for histo filling const double weight = e.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); int flavour = 0; const InitialQuarks& iqf = apply(e, "IQF"); // If we only have two quarks (qqbar), just take the flavour. // If we have more than two quarks, look for the highest energetic q-qbar pair. /// @todo Can we make this based on hadron flavour instead? Particles quarks; if (iqf.particles().size() == 2) { flavour = iqf.particles().front().abspid(); quarks = iqf.particles(); } else { map quarkmap; foreach (const Particle& p, iqf.particles()) { if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p; else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p; } double maxenergy = 0.; for (int i = 1; i <= 5; ++i) { double energy(0.); if (quarkmap.find( i) != quarkmap.end()) energy += quarkmap[ i].E(); if (quarkmap.find(-i) != quarkmap.end()) energy += quarkmap[-i].E(); if (energy > maxenergy) flavour = i; } if (quarkmap.find(flavour) != quarkmap.end()) quarks.push_back(quarkmap[flavour]); if (quarkmap.find(-flavour) != quarkmap.end()) quarks.push_back(quarkmap[-flavour]); } switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _SumOfudsWeights += weight; break; case PID::CQUARK: _SumOfcWeights += weight; break; case PID::BQUARK: _SumOfbWeights += weight; break; } // thrust axis for projections Vector3 axis = apply(e, "Thrust").thrustAxis(); double dot(0.); if (!quarks.empty()) { dot = quarks[0].p3().dot(axis); if (quarks[0].pid() < 0) dot *= -1; } foreach (const Particle& p, fs.particles()) { const double xp = p.p3().mod()/meanBeamMom; // if in quark or antiquark hemisphere bool quark = p.p3().dot(axis)*dot > 0.; _h_XpChargedN->fill(xp, weight); _temp_XpChargedN1->fill(xp, weight); _temp_XpChargedN2->fill(xp, weight); _temp_XpChargedN3->fill(xp, weight); int id = p.abspid(); // charged pions if (id == PID::PIPLUS) { _h_XpPiPlusN->fill(xp, weight); _multPiPlus[0] += weight; switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multPiPlus[1] += weight; _h_XpPiPlusLight->fill(xp, weight); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RPiPlus->fill(xp, weight); else _h_RPiMinus->fill(xp, weight); break; case PID::CQUARK: _multPiPlus[2] += weight; _h_XpPiPlusCharm->fill(xp, weight); break; case PID::BQUARK: _multPiPlus[3] += weight; _h_XpPiPlusBottom->fill(xp, weight); break; } } else if (id == PID::KPLUS) { _h_XpKPlusN->fill(xp, weight); _multKPlus[0] += weight; switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multKPlus[1] += weight; _temp_XpKPlusLight->fill(xp, weight); _h_XpKPlusLight->fill(xp, weight); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RKPlus->fill(xp, weight); else _h_RKMinus->fill(xp, weight); break; break; case PID::CQUARK: _multKPlus[2] += weight; _h_XpKPlusCharm->fill(xp, weight); _temp_XpKPlusCharm->fill(xp, weight); break; case PID::BQUARK: _multKPlus[3] += weight; _h_XpKPlusBottom->fill(xp, weight); break; } } else if (id == PID::PROTON) { _h_XpProtonN->fill(xp, weight); _multProton[0] += weight; switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multProton[1] += weight; _temp_XpProtonLight->fill(xp, weight); _h_XpProtonLight->fill(xp, weight); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RProton->fill(xp, weight); else _h_RPBar ->fill(xp, weight); break; break; case PID::CQUARK: _multProton[2] += weight; _temp_XpProtonCharm->fill(xp, weight); _h_XpProtonCharm->fill(xp, weight); break; case PID::BQUARK: _multProton[3] += weight; _h_XpProtonBottom->fill(xp, weight); break; } } } - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { const double xp = p.p3().mod()/meanBeamMom; // if in quark or antiquark hemisphere bool quark = p.p3().dot(axis)*dot>0.; int id = p.abspid(); if (id == PID::LAMBDA) { _multLambda[0] += weight; _h_XpLambdaN->fill(xp, weight); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multLambda[1] += weight; _h_XpLambdaLight->fill(xp, weight); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RLambda->fill(xp, weight); else _h_RLBar ->fill(xp, weight); break; case PID::CQUARK: _multLambda[2] += weight; _h_XpLambdaCharm->fill(xp, weight); break; case PID::BQUARK: _multLambda[3] += weight; _h_XpLambdaBottom->fill(xp, weight); break; } } else if (id == 313) { _multKStar0[0] += weight; _h_XpKStar0N->fill(xp, weight); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multKStar0[1] += weight; _temp_XpKStar0Light->fill(xp, weight); _h_XpKStar0Light->fill(xp, weight); if ( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RKS0 ->fill(xp, weight); else _h_RKSBar0->fill(xp, weight); break; break; case PID::CQUARK: _multKStar0[2] += weight; _temp_XpKStar0Charm->fill(xp, weight); _h_XpKStar0Charm->fill(xp, weight); break; case PID::BQUARK: _multKStar0[3] += weight; _h_XpKStar0Bottom->fill(xp, weight); break; } } else if (id == 333) { _multPhi[0] += weight; _h_XpPhiN->fill(xp, weight); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multPhi[1] += weight; _h_XpPhiLight->fill(xp, weight); break; case PID::CQUARK: _multPhi[2] += weight; _h_XpPhiCharm->fill(xp, weight); break; case PID::BQUARK: _multPhi[3] += weight; _h_XpPhiBottom->fill(xp, weight); break; } } else if (id == PID::K0S || id == PID::K0L) { _multK0[0] += weight; _h_XpK0N->fill(xp, weight); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multK0[1] += weight; _h_XpK0Light->fill(xp, weight); break; case PID::CQUARK: _multK0[2] += weight; _h_XpK0Charm->fill(xp, weight); break; case PID::BQUARK: _multK0[3] += weight; _h_XpK0Bottom->fill(xp, weight); break; } } } } void init() { // Projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); declare(InitialQuarks(), "IQF"); declare(Thrust(FinalState()), "Thrust"); _temp_XpChargedN1 = bookHisto1D("TMP/XpChargedN1", refData( 1, 1, 1)); _temp_XpChargedN2 = bookHisto1D("TMP/XpChargedN2", refData( 2, 1, 1)); _temp_XpChargedN3 = bookHisto1D("TMP/XpChargedN3", refData( 3, 1, 1)); _h_XpPiPlusN = bookHisto1D( 1, 1, 2); _h_XpKPlusN = bookHisto1D( 2, 1, 2); _h_XpProtonN = bookHisto1D( 3, 1, 2); _h_XpChargedN = bookHisto1D( 4, 1, 1); _h_XpK0N = bookHisto1D( 5, 1, 1); _h_XpLambdaN = bookHisto1D( 7, 1, 1); _h_XpKStar0N = bookHisto1D( 8, 1, 1); _h_XpPhiN = bookHisto1D( 9, 1, 1); _h_XpPiPlusLight = bookHisto1D(10, 1, 1); _h_XpPiPlusCharm = bookHisto1D(10, 1, 2); _h_XpPiPlusBottom = bookHisto1D(10, 1, 3); _h_XpKPlusLight = bookHisto1D(12, 1, 1); _h_XpKPlusCharm = bookHisto1D(12, 1, 2); _h_XpKPlusBottom = bookHisto1D(12, 1, 3); _h_XpKStar0Light = bookHisto1D(14, 1, 1); _h_XpKStar0Charm = bookHisto1D(14, 1, 2); _h_XpKStar0Bottom = bookHisto1D(14, 1, 3); _h_XpProtonLight = bookHisto1D(16, 1, 1); _h_XpProtonCharm = bookHisto1D(16, 1, 2); _h_XpProtonBottom = bookHisto1D(16, 1, 3); _h_XpLambdaLight = bookHisto1D(18, 1, 1); _h_XpLambdaCharm = bookHisto1D(18, 1, 2); _h_XpLambdaBottom = bookHisto1D(18, 1, 3); _h_XpK0Light = bookHisto1D(20, 1, 1); _h_XpK0Charm = bookHisto1D(20, 1, 2); _h_XpK0Bottom = bookHisto1D(20, 1, 3); _h_XpPhiLight = bookHisto1D(22, 1, 1); _h_XpPhiCharm = bookHisto1D(22, 1, 2); _h_XpPhiBottom = bookHisto1D(22, 1, 3); _temp_XpKPlusCharm = bookHisto1D("TMP/XpKPlusCharm", refData(13, 1, 1)); _temp_XpKPlusLight = bookHisto1D("TMP/XpKPlusLight", refData(13, 1, 1)); _temp_XpKStar0Charm = bookHisto1D("TMP/XpKStar0Charm", refData(15, 1, 1)); _temp_XpKStar0Light = bookHisto1D("TMP/XpKStar0Light", refData(15, 1, 1)); _temp_XpProtonCharm = bookHisto1D("TMP/XpProtonCharm", refData(17, 1, 1)); _temp_XpProtonLight = bookHisto1D("TMP/XpProtonLight", refData(17, 1, 1)); _h_RPiPlus = bookHisto1D( 26, 1, 1); _h_RPiMinus = bookHisto1D( 26, 1, 2); _h_RKS0 = bookHisto1D( 28, 1, 1); _h_RKSBar0 = bookHisto1D( 28, 1, 2); _h_RKPlus = bookHisto1D( 30, 1, 1); _h_RKMinus = bookHisto1D( 30, 1, 2); _h_RProton = bookHisto1D( 32, 1, 1); _h_RPBar = bookHisto1D( 32, 1, 2); _h_RLambda = bookHisto1D( 34, 1, 1); _h_RLBar = bookHisto1D( 34, 1, 2); _s_Xp_PiPl_Ch = bookScatter2D(1, 1, 1); _s_Xp_KPl_Ch = bookScatter2D(2, 1, 1); _s_Xp_Pr_Ch = bookScatter2D(3, 1, 1); _s_Xp_PiPlCh_PiPlLi = bookScatter2D(11, 1, 1); _s_Xp_PiPlBo_PiPlLi = bookScatter2D(11, 1, 2); _s_Xp_KPlCh_KPlLi = bookScatter2D(13, 1, 1); _s_Xp_KPlBo_KPlLi = bookScatter2D(13, 1, 2); _s_Xp_KS0Ch_KS0Li = bookScatter2D(15, 1, 1); _s_Xp_KS0Bo_KS0Li = bookScatter2D(15, 1, 2); _s_Xp_PrCh_PrLi = bookScatter2D(17, 1, 1); _s_Xp_PrBo_PrLi = bookScatter2D(17, 1, 2); _s_Xp_LaCh_LaLi = bookScatter2D(19, 1, 1); _s_Xp_LaBo_LaLi = bookScatter2D(19, 1, 2); _s_Xp_K0Ch_K0Li = bookScatter2D(21, 1, 1); _s_Xp_K0Bo_K0Li = bookScatter2D(21, 1, 2); _s_Xp_PhiCh_PhiLi = bookScatter2D(23, 1, 1); _s_Xp_PhiBo_PhiLi = bookScatter2D(23, 1, 2); _s_PiM_PiP = bookScatter2D(27, 1, 1); _s_KSBar0_KS0 = bookScatter2D(29, 1, 1); _s_KM_KP = bookScatter2D(31, 1, 1); _s_Pr_PBar = bookScatter2D(33, 1, 1); _s_Lam_LBar = bookScatter2D(35, 1, 1); } /// Finalize void finalize() { // Get the ratio plots sorted out first divide(_h_XpPiPlusN, _temp_XpChargedN1, _s_Xp_PiPl_Ch); divide(_h_XpKPlusN, _temp_XpChargedN2, _s_Xp_KPl_Ch); divide(_h_XpProtonN, _temp_XpChargedN3, _s_Xp_Pr_Ch); divide(_h_XpPiPlusCharm, _h_XpPiPlusLight, _s_Xp_PiPlCh_PiPlLi); _s_Xp_PiPlCh_PiPlLi->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpPiPlusBottom, _h_XpPiPlusLight, _s_Xp_PiPlBo_PiPlLi); _s_Xp_PiPlBo_PiPlLi->scale(1.,_SumOfudsWeights/_SumOfbWeights); divide(_temp_XpKPlusCharm , _temp_XpKPlusLight, _s_Xp_KPlCh_KPlLi); _s_Xp_KPlCh_KPlLi->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpKPlusBottom, _h_XpKPlusLight, _s_Xp_KPlBo_KPlLi); _s_Xp_KPlBo_KPlLi->scale(1.,_SumOfudsWeights/_SumOfbWeights); divide(_temp_XpKStar0Charm, _temp_XpKStar0Light, _s_Xp_KS0Ch_KS0Li); _s_Xp_KS0Ch_KS0Li->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpKStar0Bottom, _h_XpKStar0Light, _s_Xp_KS0Bo_KS0Li); _s_Xp_KS0Bo_KS0Li->scale(1.,_SumOfudsWeights/_SumOfbWeights); divide(_temp_XpProtonCharm, _temp_XpProtonLight, _s_Xp_PrCh_PrLi); _s_Xp_PrCh_PrLi->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpProtonBottom, _h_XpProtonLight, _s_Xp_PrBo_PrLi); _s_Xp_PrBo_PrLi->scale(1.,_SumOfudsWeights/_SumOfbWeights); divide(_h_XpLambdaCharm, _h_XpLambdaLight, _s_Xp_LaCh_LaLi); _s_Xp_LaCh_LaLi->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpLambdaBottom, _h_XpLambdaLight, _s_Xp_LaBo_LaLi); _s_Xp_LaBo_LaLi->scale(1.,_SumOfudsWeights/_SumOfbWeights); divide(_h_XpK0Charm, _h_XpK0Light, _s_Xp_K0Ch_K0Li); _s_Xp_K0Ch_K0Li->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpK0Bottom, _h_XpK0Light, _s_Xp_K0Bo_K0Li); _s_Xp_K0Bo_K0Li->scale(1.,_SumOfudsWeights/_SumOfbWeights); divide(_h_XpPhiCharm, _h_XpPhiLight, _s_Xp_PhiCh_PhiLi); _s_Xp_PhiCh_PhiLi->scale(1.,_SumOfudsWeights/_SumOfcWeights); divide(_h_XpPhiBottom, _h_XpPhiLight, _s_Xp_PhiBo_PhiLi); _s_Xp_PhiBo_PhiLi->scale(1.,_SumOfudsWeights/_SumOfbWeights); // Then the leading particles divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP); divide(*_h_RKSBar0 - *_h_RKS0, *_h_RKSBar0 + *_h_RKS0, _s_KSBar0_KS0); divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP); divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar); divide(*_h_RLambda - *_h_RLBar, *_h_RLambda + *_h_RLBar, _s_Lam_LBar); // Then the rest scale(_h_XpPiPlusN, 1/sumOfWeights()); scale(_h_XpKPlusN, 1/sumOfWeights()); scale(_h_XpProtonN, 1/sumOfWeights()); scale(_h_XpChargedN, 1/sumOfWeights()); scale(_h_XpK0N, 1/sumOfWeights()); scale(_h_XpLambdaN, 1/sumOfWeights()); scale(_h_XpKStar0N, 1/sumOfWeights()); scale(_h_XpPhiN, 1/sumOfWeights()); scale(_h_XpPiPlusLight, 1/_SumOfudsWeights); scale(_h_XpPiPlusCharm, 1/_SumOfcWeights); scale(_h_XpPiPlusBottom, 1/_SumOfbWeights); scale(_h_XpKPlusLight, 1/_SumOfudsWeights); scale(_h_XpKPlusCharm, 1/_SumOfcWeights); scale(_h_XpKPlusBottom, 1/_SumOfbWeights); scale(_h_XpKStar0Light, 1/_SumOfudsWeights); scale(_h_XpKStar0Charm, 1/_SumOfcWeights); scale(_h_XpKStar0Bottom, 1/_SumOfbWeights); scale(_h_XpProtonLight, 1/_SumOfudsWeights); scale(_h_XpProtonCharm, 1/_SumOfcWeights); scale(_h_XpProtonBottom, 1/_SumOfbWeights); scale(_h_XpLambdaLight, 1/_SumOfudsWeights); scale(_h_XpLambdaCharm, 1/_SumOfcWeights); scale(_h_XpLambdaBottom, 1/_SumOfbWeights); scale(_h_XpK0Light, 1/_SumOfudsWeights); scale(_h_XpK0Charm, 1/_SumOfcWeights); scale(_h_XpK0Bottom, 1/_SumOfbWeights); scale(_h_XpPhiLight, 1/_SumOfudsWeights); scale(_h_XpPhiCharm , 1/_SumOfcWeights); scale(_h_XpPhiBottom, 1/_SumOfbWeights); scale(_h_RPiPlus, 1/_SumOfudsWeights); scale(_h_RPiMinus, 1/_SumOfudsWeights); scale(_h_RKS0, 1/_SumOfudsWeights); scale(_h_RKSBar0, 1/_SumOfudsWeights); scale(_h_RKPlus, 1/_SumOfudsWeights); scale(_h_RKMinus, 1/_SumOfudsWeights); scale(_h_RProton, 1/_SumOfudsWeights); scale(_h_RPBar, 1/_SumOfudsWeights); scale(_h_RLambda, 1/_SumOfudsWeights); scale(_h_RLBar, 1/_SumOfudsWeights); // Multiplicities double avgNumPartsAll, avgNumPartsLight,avgNumPartsCharm, avgNumPartsBottom; // pi+/- // all avgNumPartsAll = _multPiPlus[0]/sumOfWeights(); bookScatter2D(24, 1, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multPiPlus[1]/_SumOfudsWeights; bookScatter2D(24, 1, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multPiPlus[2]/_SumOfcWeights; bookScatter2D(24, 1, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multPiPlus[3]/_SumOfbWeights; bookScatter2D(24, 1, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 1, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 1, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // K+/- // all avgNumPartsAll = _multKPlus[0]/sumOfWeights(); bookScatter2D(24, 2, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multKPlus[1]/_SumOfudsWeights; bookScatter2D(24, 2, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multKPlus[2]/_SumOfcWeights; bookScatter2D(24, 2, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multKPlus[3]/_SumOfbWeights; bookScatter2D(24, 2, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 2, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 2, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // K0 // all avgNumPartsAll = _multK0[0]/sumOfWeights(); bookScatter2D(24, 3, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multK0[1]/_SumOfudsWeights; bookScatter2D(24, 3, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multK0[2]/_SumOfcWeights; bookScatter2D(24, 3, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multK0[3]/_SumOfbWeights; bookScatter2D(24, 3, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 3, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 3, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // K*0 // all avgNumPartsAll = _multKStar0[0]/sumOfWeights(); bookScatter2D(24, 4, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multKStar0[1]/_SumOfudsWeights; bookScatter2D(24, 4, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multKStar0[2]/_SumOfcWeights; bookScatter2D(24, 4, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multKStar0[3]/_SumOfbWeights; bookScatter2D(24, 4, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 4, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 4, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // phi // all avgNumPartsAll = _multPhi[0]/sumOfWeights(); bookScatter2D(24, 5, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multPhi[1]/_SumOfudsWeights; bookScatter2D(24, 5, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multPhi[2]/_SumOfcWeights; bookScatter2D(24, 5, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multPhi[3]/_SumOfbWeights; bookScatter2D(24, 5, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 5, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 5, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // p // all avgNumPartsAll = _multProton[0]/sumOfWeights(); bookScatter2D(24, 6, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multProton[1]/_SumOfudsWeights; bookScatter2D(24, 6, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multProton[2]/_SumOfcWeights; bookScatter2D(24, 6, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multProton[3]/_SumOfbWeights; bookScatter2D(24, 6, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 6, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 6, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // Lambda // all avgNumPartsAll = _multLambda[0]/sumOfWeights(); bookScatter2D(24, 7, 1, true)->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = _multLambda[1]/_SumOfudsWeights; bookScatter2D(24, 7, 2, true)->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = _multLambda[2]/_SumOfcWeights; bookScatter2D(24, 7, 3, true)->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = _multLambda[3]/_SumOfbWeights; bookScatter2D(24, 7, 4, true)->point(0).setY(avgNumPartsBottom); // charm-light bookScatter2D(25, 7, 1, true)->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light bookScatter2D(25, 7, 2, true)->point(0).setY(avgNumPartsBottom - avgNumPartsLight); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles. Used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. double _SumOfudsWeights, _SumOfcWeights, _SumOfbWeights; vector _multPiPlus, _multKPlus, _multK0, _multKStar0, _multPhi, _multProton, _multLambda; Histo1DPtr _h_XpPiPlusSig, _h_XpPiPlusN; Histo1DPtr _h_XpKPlusSig, _h_XpKPlusN; Histo1DPtr _h_XpProtonSig, _h_XpProtonN; Histo1DPtr _h_XpChargedN; Histo1DPtr _h_XpK0N, _h_XpLambdaN; Histo1DPtr _h_XpKStar0N, _h_XpPhiN; Histo1DPtr _h_XpPiPlusLight, _h_XpPiPlusCharm, _h_XpPiPlusBottom; Histo1DPtr _h_XpKPlusLight, _h_XpKPlusCharm, _h_XpKPlusBottom; Histo1DPtr _h_XpKStar0Light, _h_XpKStar0Charm, _h_XpKStar0Bottom; Histo1DPtr _h_XpProtonLight, _h_XpProtonCharm, _h_XpProtonBottom; Histo1DPtr _h_XpLambdaLight, _h_XpLambdaCharm, _h_XpLambdaBottom; Histo1DPtr _h_XpK0Light, _h_XpK0Charm, _h_XpK0Bottom; Histo1DPtr _h_XpPhiLight, _h_XpPhiCharm, _h_XpPhiBottom; Histo1DPtr _temp_XpChargedN1, _temp_XpChargedN2, _temp_XpChargedN3; Histo1DPtr _temp_XpKPlusCharm , _temp_XpKPlusLight; Histo1DPtr _temp_XpKStar0Charm, _temp_XpKStar0Light; Histo1DPtr _temp_XpProtonCharm, _temp_XpProtonLight; Histo1DPtr _h_RPiPlus, _h_RPiMinus; Histo1DPtr _h_RKS0, _h_RKSBar0; Histo1DPtr _h_RKPlus, _h_RKMinus; Histo1DPtr _h_RProton, _h_RPBar; Histo1DPtr _h_RLambda, _h_RLBar; Scatter2DPtr _s_Xp_PiPl_Ch, _s_Xp_KPl_Ch, _s_Xp_Pr_Ch; Scatter2DPtr _s_Xp_PiPlCh_PiPlLi, _s_Xp_PiPlBo_PiPlLi; Scatter2DPtr _s_Xp_KPlCh_KPlLi, _s_Xp_KPlBo_KPlLi; Scatter2DPtr _s_Xp_KS0Ch_KS0Li, _s_Xp_KS0Bo_KS0Li; Scatter2DPtr _s_Xp_PrCh_PrLi, _s_Xp_PrBo_PrLi; Scatter2DPtr _s_Xp_LaCh_LaLi, _s_Xp_LaBo_LaLi; Scatter2DPtr _s_Xp_K0Ch_K0Li, _s_Xp_K0Bo_K0Li; Scatter2DPtr _s_Xp_PhiCh_PhiLi, _s_Xp_PhiBo_PhiLi; Scatter2DPtr _s_PiM_PiP, _s_KSBar0_KS0, _s_KM_KP, _s_Pr_PBar, _s_Lam_LBar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(SLD_1999_S3743934); } diff --git a/analyses/pluginLHCb/LHCB_2010_S8758301.cc b/analyses/pluginLHCb/LHCB_2010_S8758301.cc --- a/analyses/pluginLHCb/LHCB_2010_S8758301.cc +++ b/analyses/pluginLHCb/LHCB_2010_S8758301.cc @@ -1,341 +1,341 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Math/Constants.hh" #include "Rivet/Math/Units.hh" #include "HepMC/GenEvent.h" #include "HepMC/GenParticle.h" #include "HepMC/GenVertex.h" #include "HepMC/SimpleVector.h" namespace Rivet { using namespace HepMC; using namespace std; // Lifetime cut: longest living ancestor ctau < 10^-11 [m] namespace { const double MAX_CTAU = 1.0E-11; // [m] const double MIN_PT = 0.0001; // [GeV/c] } class LHCB_2010_S8758301 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor LHCB_2010_S8758301() : Analysis("LHCB_2010_S8758301"), sumKs0_30(0.0), sumKs0_35(0.0), sumKs0_40(0.0), sumKs0_badnull(0), sumKs0_badlft(0), sumKs0_all(0), sumKs0_outup(0), sumKs0_outdwn(0), sum_low_pt_loss(0), sum_high_pt_loss(0) { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { MSG_DEBUG("Initializing analysis!"); fillMap(partLftMap); _h_K0s_pt_30 = bookHisto1D(1,1,1); _h_K0s_pt_35 = bookHisto1D(1,1,2); _h_K0s_pt_40 = bookHisto1D(1,1,3); _h_K0s_pt_y_30 = bookHisto1D(2,1,1); _h_K0s_pt_y_35 = bookHisto1D(2,1,2); _h_K0s_pt_y_40 = bookHisto1D(2,1,3); _h_K0s_pt_y_all = bookHisto1D(3,1,1); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); } /// Perform the per-event analysis void analyze(const Event& event) { int id; double y, pT; const double weight = event.weight(); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); double ancestor_lftime; foreach (const Particle& p, ufs.particles()) { id = p.pid(); if ((id != 310) && (id != -310)) continue; sumKs0_all ++; ancestor_lftime = 0.; const GenParticle* long_ancestor = getLongestLivedAncestor(p, ancestor_lftime); if ( !(long_ancestor) ) { sumKs0_badnull ++; continue; } if ( ancestor_lftime > MAX_CTAU ) { sumKs0_badlft ++; MSG_DEBUG("Ancestor " << long_ancestor->pdg_id() << ", ctau: " << ancestor_lftime << " [m]"); continue; } const FourMomentum& qmom = p.momentum(); y = 0.5 * log((qmom.E() + qmom.pz())/(qmom.E() - qmom.pz())); pT = sqrt((qmom.px() * qmom.px()) + (qmom.py() * qmom.py())); if (pT < MIN_PT) { sum_low_pt_loss ++; MSG_DEBUG("Small pT K^0_S: " << pT << " GeV/c."); } if (pT > 1.6) { sum_high_pt_loss ++; } if (y > 2.5 && y < 4.0) { _h_K0s_pt_y_all->fill(pT, weight); if (y > 2.5 && y < 3.0) { _h_K0s_pt_y_30->fill(pT, weight); _h_K0s_pt_30->fill(pT, weight); sumKs0_30 += weight; } else if (y > 3.0 && y < 3.5) { _h_K0s_pt_y_35->fill(pT, weight); _h_K0s_pt_35->fill(pT, weight); sumKs0_35 += weight; } else if (y > 3.5 && y < 4.0) { _h_K0s_pt_y_40->fill(pT, weight); _h_K0s_pt_40->fill(pT, weight); sumKs0_40 += weight; } } else if (y < 2.5) { sumKs0_outdwn ++; } else if (y > 4.0) { sumKs0_outup ++; } } } /// Normalise histograms etc., after the run void finalize() { MSG_DEBUG("Total number Ks0: " << sumKs0_all << endl << "Sum of weights: " << sumOfWeights() << endl << "Weight Ks0 (2.5 < y < 3.0): " << sumKs0_30 << endl << "Weight Ks0 (3.0 < y < 3.5): " << sumKs0_35 << endl << "Weight Ks0 (3.5 < y < 4.0): " << sumKs0_40 << endl << "Nb. unprompt Ks0 [null mother]: " << sumKs0_badnull << endl << "Nb. unprompt Ks0 [mother lifetime exceeded]: " << sumKs0_badlft << endl << "Nb. Ks0 (y > 4.0): " << sumKs0_outup << endl << "Nb. Ks0 (y < 2.5): " << sumKs0_outdwn << endl << "Nb. Ks0 (pT < " << (MIN_PT/MeV) << " MeV/c): " << sum_low_pt_loss << endl << "Nb. Ks0 (pT > 1.6 GeV/c): " << sum_high_pt_loss << endl << "Cross-section [mb]: " << crossSection()/millibarn << endl << "Nb. events: " << numEvents()); // Compute cross-section; multiply by bin width for correct scaling // cross-section given by Rivet in pb double xsection_factor = crossSection()/sumOfWeights(); // Multiply bin width for correct scaling, xsection in mub scale(_h_K0s_pt_30, 0.2*xsection_factor/microbarn); scale(_h_K0s_pt_35, 0.2*xsection_factor/microbarn); scale(_h_K0s_pt_40, 0.2*xsection_factor/microbarn); // Divide by dy (rapidity window width), xsection in mb scale(_h_K0s_pt_y_30, xsection_factor/0.5/millibarn); scale(_h_K0s_pt_y_35, xsection_factor/0.5/millibarn); scale(_h_K0s_pt_y_40, xsection_factor/0.5/millibarn); scale(_h_K0s_pt_y_all, xsection_factor/1.5/millibarn); } //@} private: /// Get particle lifetime from hardcoded data double getLifeTime(int pid) { double lft = -1.0; if (pid < 0) pid = - pid; // Correct Pythia6 PIDs for f0(980), f0(1370) mesons if (pid == 10331) pid = 30221; if (pid == 10221) pid = 9010221; map::iterator pPartLft = partLftMap.find(pid); // search stable particle list if (pPartLft == partLftMap.end()) { if (pid <= 100 || pid == 990) return 0.0; for (unsigned int i=0; i < sizeof(stablePDGIds)/sizeof(unsigned int); i++ ) { if (pid == stablePDGIds[i]) { lft = 0.0; break; } } } else { lft = (*pPartLft).second; } if (lft < 0.0) MSG_ERROR("Could not determine lifetime for particle with PID " << pid << "... This K_s^0 will be considered unprompt!"); return lft; } const GenParticle* getLongestLivedAncestor(const Particle& p, double& lifeTime) { const GenParticle* ret = NULL; lifeTime = 1.; if (p.genParticle() == NULL) return NULL; const GenParticle* pmother = p.genParticle(); double longest_ctau = 0.; double mother_ctau; int mother_pid, n_inparts; const GenVertex* ivertex = pmother->production_vertex(); while (ivertex) { n_inparts = ivertex->particles_in_size(); if (n_inparts < 1) {ret = NULL; break;} // error: should never happen! const GenVertex::particles_in_const_iterator iPart_invtx = ivertex->particles_in_const_begin(); pmother = (*iPart_invtx); // first mother particle mother_pid = pmother->pdg_id(); ivertex = pmother->production_vertex(); // get next vertex if ( (mother_pid == 2212) || (mother_pid <= 100) ) { if (ret == NULL) ret = pmother; continue; } mother_ctau = getLifeTime(mother_pid); if (mother_ctau < 0.) { ret= NULL; break; } // error:should never happen! if (mother_ctau > longest_ctau) { longest_ctau = mother_ctau; ret = pmother; } } if (ret) lifeTime = longest_ctau * c_light; return ret; } // Fill the PDG Id to Lifetime[seconds] map // Data was extract from LHCb Particle Table using ParticleSvc bool fillMap(map &m) { m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16; m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16; m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26; m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12; m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24; m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08; m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24; m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24; m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24; m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23; m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21; m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12; m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13; m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19; m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22; m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23; m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12; m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13; m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24; m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24; m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24; m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24; m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24; m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24; m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24; m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24; m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24; m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24; m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10; m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23; m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22; m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14; m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19; m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19; m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19; m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12; m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12; m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24; m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24; m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24; m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24; m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24; m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23; m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23; m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24; m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24; m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24; m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24; m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23; m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24; m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24; m[13224] = 1.09702E-23; m[13226] = 5.485102E-24; m[13312] = 4.135667E-22; m[13314] = 2.742551E-23; m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24; m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24; m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24; m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22; m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24; m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24; m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24; m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24; m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24; m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24; m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24; m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24; m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24; m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24; m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24; m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24; m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24; m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24; m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24; m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24; m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24; m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24; m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20; m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24; m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24; m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24; m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24; m[9020221] = 8.093281E-23; m[9020443] = 1.061633E-23; m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24; m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24; m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21; return true; } /// @name Histograms //@{ Histo1DPtr _h_K0s_pt_y_30; // histogram for 2.5 < y < 3.0 (d2sigma) Histo1DPtr _h_K0s_pt_y_35; // histogram for 3.0 < y < 3.5 (d2sigma) Histo1DPtr _h_K0s_pt_y_40; // histogram for 3.5 < y < 4.0 (d2sigma) Histo1DPtr _h_K0s_pt_30; // histogram for 2.5 < y < 3.0 (sigma) Histo1DPtr _h_K0s_pt_35; // histogram for 3.0 < y < 3.5 (sigma) Histo1DPtr _h_K0s_pt_40; // histogram for 3.5 < y < 4.0 (sigma) Histo1DPtr _h_K0s_pt_y_all; // histogram for 2.5 < y < 4.0 (d2sigma) double sumKs0_30; // Sum of weights 2.5 < y < 3.0 double sumKs0_35; // Sum of weights 3.0 < y < 3.5 double sumKs0_40; // Sum of weights 3.5 < y < 4.0 // Various counters mainly for debugging and comparisons between different generators size_t sumKs0_badnull; // Nb of particles for which mother could not be identified size_t sumKs0_badlft; // Nb of mesons with long lived mothers size_t sumKs0_all; // Nb of all Ks0 generated size_t sumKs0_outup; // Nb of mesons with y > 4.0 size_t sumKs0_outdwn; // Nb of mesons with y < 2.5 size_t sum_low_pt_loss; // Nb of mesons with very low pT (indicates when units are mixed-up) size_t sum_high_pt_loss; // Nb of mesons with pT > 1.6 GeV/c // Map between PDG id and particle lifetimes in seconds std::map partLftMap; // Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable) static const int stablePDGIds[205]; //@} }; // Actual initialization according to ISO C++ requirements const int LHCB_2010_S8758301::stablePDGIds[205] = { 311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303, 4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414, 4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324, 5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534, 5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112, 12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343, 30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321, 100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555, 120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013, 2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223, 3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001, 4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023, 9900024, 9900041, 9900042}; // Hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2010_S8758301); } diff --git a/analyses/pluginLHCb/LHCB_2011_I917009.cc b/analyses/pluginLHCb/LHCB_2011_I917009.cc --- a/analyses/pluginLHCb/LHCB_2011_I917009.cc +++ b/analyses/pluginLHCb/LHCB_2011_I917009.cc @@ -1,323 +1,323 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class LHCB_2011_I917009 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor LHCB_2011_I917009() : Analysis("LHCB_2011_I917009"), rap_beam(0.0), pt_min(0.0), pt1_edge(0.65), pt2_edge(1.0), pt3_edge(2.5), rap_min(2.), rap_max(0.0), dsShift(0) { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { int y_nbins = 4; fillMap(partLftMap); if (fuzzyEquals(sqrtS(), 0.9*TeV)) { rap_beam = 6.87; rap_max = 4.; pt_min = 0.25; } else if (fuzzyEquals(sqrtS(), 7*TeV)) { rap_beam = 8.92; rap_max = 4.5; pt_min = 0.15; y_nbins = 5; dsShift = 8; } else { MSG_ERROR("Incompatible beam energy!"); } // Create the sets of temporary histograms that will be used to make the ratios in the finalize() for (size_t i = 0; i < 12; ++i) _tmphistos[i] = YODA::Histo1D(y_nbins, rap_min, rap_max); for (size_t i = 12; i < 15; ++i) _tmphistos[i] = YODA::Histo1D(refData(dsShift+5, 1, 1)); for (size_t i = 15; i < 18; ++i) _tmphistos[i] = YODA::Histo1D(y_nbins, rap_beam - rap_max, rap_beam - rap_min); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); double ancestor_lftsum = 0.0; double y, pT; int id; int partIdx = -1; foreach (const Particle& p, ufs.particles()) { id = p.pid(); // continue if particle not a K0s nor (anti-)Lambda if ( (id == 310) || (id == -310) ) { partIdx = 2; } else if ( id == 3122 ) { partIdx = 1; } else if ( id == -3122 ) { partIdx = 0; } else { continue; } ancestor_lftsum = getMotherLifeTimeSum(p); // Lifetime cut: ctau sum of all particle ancestors < 10^-9 m according to the paper (see eq. 5) const double MAX_CTAU = 1.0E-9; // [m] if ( (ancestor_lftsum < 0.0) || (ancestor_lftsum > MAX_CTAU) ) continue; const FourMomentum& qmom = p.momentum(); y = log((qmom.E() + qmom.pz())/(qmom.E() - qmom.pz()))/2.; // skip this particle if it has too high or too low rapidity (extremely rare cases when E = +- pz) if ( std::isnan(y) || std::isinf(y) ) continue; y = fabs(y); if (!inRange(y, rap_min, rap_max)) continue; pT = sqrt((qmom.px() * qmom.px()) + (qmom.py() * qmom.py())); if (!inRange(pT, pt_min, pt3_edge)) continue; // Filling corresponding temporary histograms for pT intervals if (inRange(pT, pt_min, pt1_edge)) _tmphistos[partIdx*3].fill(y, weight); if (inRange(pT, pt1_edge, pt2_edge)) _tmphistos[partIdx*3+1].fill(y, weight); if (inRange(pT, pt2_edge, pt3_edge)) _tmphistos[partIdx*3+2].fill(y, weight); // Fill histo in rapidity for whole pT interval _tmphistos[partIdx+9].fill(y, weight); // Fill histo in pT for whole rapidity interval _tmphistos[partIdx+12].fill(pT, weight); // Fill histo in rapidity loss for whole pT interval _tmphistos[partIdx+15].fill(rap_beam - y, weight); } } // Generate the ratio histograms void finalize() { int dsId = dsShift + 1; for (size_t j = 0; j < 3; ++j) { /// @todo Compactify to two one-liners Scatter2DPtr s1 = bookScatter2D(dsId, 1, j+1); divide(_tmphistos[j], _tmphistos[3+j], s1); Scatter2DPtr s2 = bookScatter2D(dsId+1, 1, j+1); divide(_tmphistos[j], _tmphistos[6+j], s2); } dsId += 2; for (size_t j = 3; j < 6; ++j) { /// @todo Compactify to two one-liners Scatter2DPtr s1 = bookScatter2D(dsId, 1, 1); divide(_tmphistos[3*j], _tmphistos[3*j+1], s1); dsId += 1; Scatter2DPtr s2 = bookScatter2D(dsId, 1, 1); divide(_tmphistos[3*j], _tmphistos[3*j+2], s2); dsId += 1; } } //@} private: // Get particle lifetime from hardcoded data double getLifeTime(int pid) { double lft = -1.0; if (pid < 0) pid = - pid; // Correct Pythia6 PIDs for f0(980), f0(1370) mesons if (pid == 10331) pid = 30221; if (pid == 10221) pid = 9010221; map::iterator pPartLft = partLftMap.find(pid); // search stable particle list if (pPartLft == partLftMap.end()) { if (pid <= 100) return 0.0; for (size_t i=0; i < sizeof(stablePDGIds)/sizeof(unsigned int); i++) { if (pid == stablePDGIds[i]) { lft = 0.0; break; } } } else { lft = (*pPartLft).second; } if (lft < 0.0 && PID::isHadron(pid)) { MSG_ERROR("Could not determine lifetime for particle with PID " << pid << "... This V^0 will be considered unprompt!"); } return lft; } // Data members like post-cuts event weight counters go here const double getMotherLifeTimeSum(const Particle& p) { if (p.genParticle() == NULL) return -1.; double lftSum = 0.; double plft = 0.; const GenParticle* part = p.genParticle(); const GenVertex* ivtx = part->production_vertex(); while (ivtx) { if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; }; const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin(); part = (*iPart_invtx); if ( !(part) ) { lftSum = -1.; break; }; ivtx = part->production_vertex(); if ( (part->pdg_id() == 2212) || !(ivtx) ) break; //reached beam plft = getLifeTime(part->pdg_id()); if (plft < 0.) { lftSum = -1.; break; }; lftSum += plft; }; return (lftSum * c_light); } /// @name Private variables //@{ // The rapidity of the beam according to the selected beam energy double rap_beam; // The edges of the intervals of transverse momentum double pt_min, pt1_edge, pt2_edge, pt3_edge; // The limits of the rapidity window double rap_min; double rap_max; // Indicates which set of histograms will be output to yoda file (according to beam energy) int dsShift; // Map between PDG id and particle lifetimes in seconds std::map partLftMap; // Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable) static const int stablePDGIds[205]; //@} /// @name Helper histograms //@{ /// Histograms are defined in the following order: anti-Lambda, Lambda and K0s. /// First 3 suites of 3 histograms correspond to each particle in bins of y for the 3 pT intervals. (9 histos) /// Next 3 histograms contain the particles in y bins for the whole pT interval (3 histos) /// Next 3 histograms contain the particles in y_loss bins for the whole pT interval (3 histos) /// Last 3 histograms contain the particles in pT bins for the whole rapidity (y) interval (3 histos) YODA::Histo1D _tmphistos[18]; //@} // Fill the PDG Id to Lifetime[seconds] map // Data was extracted from LHCb Particle Table through LHCb::ParticlePropertySvc bool fillMap(map& m) { m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16; m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16; m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26; m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12; m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24; m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08; m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24; m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24; m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24; m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23; m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21; m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12; m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13; m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19; m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22; m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23; m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12; m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13; m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24; m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24; m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24; m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24; m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24; m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24; m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24; m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24; m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24; m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24; m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10; m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23; m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22; m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14; m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19; m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19; m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19; m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12; m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12; m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24; m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24; m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24; m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24; m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24; m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23; m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23; m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24; m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24; m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24; m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24; m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23; m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24; m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24; m[13224] = 1.09702E-23; m[13226] = 5.485102E-24; m[13314] = 2.742551E-23; m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24; m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24; m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24; m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22; m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24; m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24; m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24; m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24; m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24; m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24; m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24; m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24; m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24; m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24; m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24; m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24; m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24; m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24; m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24; m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24; m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24; m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24; m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20; m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24; m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24; m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24; m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24; m[9020443] = 1.061633E-23; m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24; m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24; m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21; return true; } }; const int LHCB_2011_I917009::stablePDGIds[205] = { 311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303, 4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414, 4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324, 5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534, 5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112, 12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343, 30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321, 100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555, 120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013, 2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223, 3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001, 4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023, 9900024, 9900041, 9900042 }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2011_I917009); } diff --git a/analyses/pluginLHCb/LHCB_2011_I919315.cc b/analyses/pluginLHCb/LHCB_2011_I919315.cc --- a/analyses/pluginLHCb/LHCB_2011_I919315.cc +++ b/analyses/pluginLHCb/LHCB_2011_I919315.cc @@ -1,96 +1,96 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { class LHCB_2011_I919315 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor LHCB_2011_I919315() : Analysis("LHCB_2011_I919315") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _h_Phi_pT_y.addHistogram( 2.44, 2.62, bookHisto1D(2, 1, 1)); _h_Phi_pT_y.addHistogram( 2.62, 2.80, bookHisto1D(2, 1, 2)); _h_Phi_pT_y.addHistogram( 2.80, 2.98, bookHisto1D(3, 1, 1)); _h_Phi_pT_y.addHistogram( 2.98, 3.16, bookHisto1D(3, 1, 2)); _h_Phi_pT_y.addHistogram( 3.16, 3.34, bookHisto1D(4, 1, 1)); _h_Phi_pT_y.addHistogram( 3.34, 3.52, bookHisto1D(4, 1, 2)); _h_Phi_pT_y.addHistogram( 3.52, 3.70, bookHisto1D(5, 1, 1)); _h_Phi_pT_y.addHistogram( 3.70, 3.88, bookHisto1D(5, 1, 2)); _h_Phi_pT_y.addHistogram( 3.88, 4.06, bookHisto1D(6, 1, 1)); _h_Phi_pT = bookHisto1D(7, 1, 1); _h_Phi_y = bookHisto1D(8, 1, 1); } /// Perform the per-event analysis void analyze (const Event& event) { const double weight = event.weight(); - const UnstableFinalState& ufs = apply (event, "UFS"); + const UnstableParticles& ufs = apply (event, "UFS"); foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); if (id == 333) { // id 333 = phi-meson double y = p.rapidity(); double pT = p.perp(); if (pT < 0.6*GeV || pT > 5.0*GeV || y < 2.44 || y > 4.06) { continue; } _h_Phi_y->fill (y, weight); _h_Phi_pT->fill (pT/MeV, weight); _h_Phi_pT_y.fill(y, pT/GeV, weight); } } } /// Normalise histograms etc., after the run void finalize() { double scale_factor = crossSectionPerEvent()/microbarn; scale (_h_Phi_y, scale_factor); scale (_h_Phi_pT, scale_factor); _h_Phi_pT_y.scale(scale_factor/1000., this); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_Phi_y; Histo1DPtr _h_Phi_pT; BinnedHistogram _h_Phi_pT_y; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2011_I919315); } //@} diff --git a/analyses/pluginLHCb/LHCB_2013_I1218996.cc b/analyses/pluginLHCb/LHCB_2013_I1218996.cc --- a/analyses/pluginLHCb/LHCB_2013_I1218996.cc +++ b/analyses/pluginLHCb/LHCB_2013_I1218996.cc @@ -1,138 +1,138 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// LHCb prompt charm hadron pT and rapidity spectra class LHCB_2013_I1218996 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor LHCB_2013_I1218996() : Analysis("LHCB_2013_I1218996") { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); /// Book histograms _h_pdg411_Dplus_pT_y.addHistogram( 2.0, 2.5, bookHisto1D(3, 1, 1)); _h_pdg411_Dplus_pT_y.addHistogram( 2.5, 3.0, bookHisto1D(3, 1, 2)); _h_pdg411_Dplus_pT_y.addHistogram( 3.0, 3.5, bookHisto1D(3, 1, 3)); _h_pdg411_Dplus_pT_y.addHistogram( 3.5, 4.0, bookHisto1D(3, 1, 4)); _h_pdg411_Dplus_pT_y.addHistogram( 4.0, 4.5, bookHisto1D(3, 1, 5)); _h_pdg421_Dzero_pT_y.addHistogram( 2.0, 2.5, bookHisto1D(2, 1, 1)); _h_pdg421_Dzero_pT_y.addHistogram( 2.5, 3.0, bookHisto1D(2, 1, 2)); _h_pdg421_Dzero_pT_y.addHistogram( 3.0, 3.5, bookHisto1D(2, 1, 3)); _h_pdg421_Dzero_pT_y.addHistogram( 3.5, 4.0, bookHisto1D(2, 1, 4)); _h_pdg421_Dzero_pT_y.addHistogram( 4.0, 4.5, bookHisto1D(2, 1, 5)); _h_pdg431_Dsplus_pT_y.addHistogram( 2.0, 2.5, bookHisto1D(5, 1, 1)); _h_pdg431_Dsplus_pT_y.addHistogram( 2.5, 3.0, bookHisto1D(5, 1, 2)); _h_pdg431_Dsplus_pT_y.addHistogram( 3.0, 3.5, bookHisto1D(5, 1, 3)); _h_pdg431_Dsplus_pT_y.addHistogram( 3.5, 4.0, bookHisto1D(5, 1, 4)); _h_pdg431_Dsplus_pT_y.addHistogram( 4.0, 4.5, bookHisto1D(5, 1, 5)); _h_pdg413_Dstarplus_pT_y.addHistogram( 2.0, 2.5, bookHisto1D(4, 1, 1)); _h_pdg413_Dstarplus_pT_y.addHistogram( 2.5, 3.0, bookHisto1D(4, 1, 2)); _h_pdg413_Dstarplus_pT_y.addHistogram( 3.0, 3.5, bookHisto1D(4, 1, 3)); _h_pdg413_Dstarplus_pT_y.addHistogram( 3.5, 4.0, bookHisto1D(4, 1, 4)); _h_pdg413_Dstarplus_pT_y.addHistogram( 4.0, 4.5, bookHisto1D(4, 1, 5)); _h_pdg4122_Lambdac_pT = bookHisto1D(1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); /// @todo Use PrimaryHadrons to avoid double counting and automatically remove the contributions from unstable? - const UnstableFinalState &ufs = apply (event, "UFS"); + const UnstableParticles &ufs = apply (event, "UFS"); foreach (const Particle& p, ufs.particles() ) { // We're only interested in charm hadrons if (!p.isHadron() || !p.hasCharm()) continue; // Kinematic acceptance const double y = p.absrap(); ///< Double analysis efficiency with a "two-sided LHCb" const double pT = p.pT(); // Fiducial acceptance of the measurements if (pT > 8.0*GeV || y < 2.0 || y > 4.5) continue; /// Experimental selection removes non-prompt charm hadrons: we ignore those from b decays if (p.fromBottom()) continue; switch (p.abspid()) { case 411: _h_pdg411_Dplus_pT_y.fill(y, pT/GeV, weight); break; case 421: _h_pdg421_Dzero_pT_y.fill(y, pT/GeV, weight); break; case 431: _h_pdg431_Dsplus_pT_y.fill(y, pT/GeV, weight); break; case 413: _h_pdg413_Dstarplus_pT_y.fill(y, pT/GeV, weight); break; case 4122: _h_pdg4122_Lambdac_pT->fill(pT/GeV, weight); break; } } } /// Normalise histograms etc., after the run void finalize() { const double scale_factor = 0.5 * crossSection()/microbarn / sumOfWeights(); /// Avoid the implicit division by the bin width in the BinnedHistogram::scale method. foreach (Histo1DPtr h, _h_pdg411_Dplus_pT_y.getHistograms()) h->scaleW(scale_factor); foreach (Histo1DPtr h, _h_pdg421_Dzero_pT_y.getHistograms()) h->scaleW(scale_factor); foreach (Histo1DPtr h, _h_pdg431_Dsplus_pT_y.getHistograms()) h->scaleW(scale_factor); foreach (Histo1DPtr h, _h_pdg413_Dstarplus_pT_y.getHistograms()) h->scaleW(scale_factor); _h_pdg4122_Lambdac_pT->scaleW(scale_factor); } //@} private: /// @name Histograms //@{ BinnedHistogram _h_pdg411_Dplus_pT_y; BinnedHistogram _h_pdg421_Dzero_pT_y; BinnedHistogram _h_pdg431_Dsplus_pT_y; BinnedHistogram _h_pdg413_Dstarplus_pT_y; Histo1DPtr _h_pdg4122_Lambdac_pT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2013_I1218996); } diff --git a/analyses/pluginLHCf/LHCF_2012_I1115479.cc b/analyses/pluginLHCf/LHCF_2012_I1115479.cc --- a/analyses/pluginLHCf/LHCF_2012_I1115479.cc +++ b/analyses/pluginLHCf/LHCF_2012_I1115479.cc @@ -1,65 +1,65 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Tools/BinnedHistogram.hh" namespace Rivet { class LHCF_2012_I1115479 : public Analysis { public: LHCF_2012_I1115479() : Analysis("LHCF_2012_I1115479") { } public: void init() { - declare(UnstableFinalState(),"UFS"); + declare(UnstableParticles(),"UFS"); _binnedHistos_y_pT.addHistogram( 8.9, 9.0, bookHisto1D(1, 1, 1)); _binnedHistos_y_pT.addHistogram( 9.0, 9.2, bookHisto1D(2, 1, 1)); _binnedHistos_y_pT.addHistogram( 9.2, 9.4, bookHisto1D(3, 1, 1)); _binnedHistos_y_pT.addHistogram( 9.4, 9.6, bookHisto1D(4, 1, 1)); _binnedHistos_y_pT.addHistogram( 9.6, 10.0, bookHisto1D(5, 1, 1)); _binnedHistos_y_pT.addHistogram(10.0, 11.0, bookHisto1D(6, 1, 1)); } void analyze(const Event& event) { - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); const double weight = event.weight(); const double dphi = TWOPI; foreach (const Particle& p, ufs.particles()) { if (p.pid() == 111) { double pT = p.pT(); double y = p.rapidity(); if (pT > 0.6*GeV) continue; const double scaled_weight = weight/(dphi*pT/GeV); _binnedHistos_y_pT.fill(y, pT/GeV, scaled_weight); } } } void finalize() { _binnedHistos_y_pT.scale( 1./sumOfWeights() , this); } private: BinnedHistogram _binnedHistos_y_pT; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCF_2012_I1115479); } diff --git a/analyses/pluginLHCf/LHCF_2016_I1385877.cc b/analyses/pluginLHCf/LHCF_2016_I1385877.cc --- a/analyses/pluginLHCf/LHCF_2016_I1385877.cc +++ b/analyses/pluginLHCf/LHCF_2016_I1385877.cc @@ -1,230 +1,230 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Tools/BinnedHistogram.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class LHCF_2016_I1385877 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(LHCF_2016_I1385877); //In case of some models there can be very small value pT but greater than 0. //In order to avoid unphysical behavior in the first bin a cutoff is needed //If you are sure the model does not have this problem you can set pt_cutoff to 0. const double pt_cutoff = 0.01; /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - addProjection(UnstableFinalState(), "UFS"); + addProjection(UnstableParticles(), "UFS"); addProjection(Beam(), "Beam"); // calculate beam rapidity const Particle bm1 = beams().first; const Particle bm2 = beams().second; _beam_rap_1 = bm1.rap(); _beam_rap_2 = bm2.rap(); MSG_INFO("Beam 1 : momentum " << bm1.pz() << " PID " << bm1.pid() << " rapidity " << bm1.rap() ); MSG_INFO("Beam 2 : momentum " << bm2.pz() << " PID " << bm2.pid() << " rapidity " << bm2.rap() ); const double _sqrts = sqrtS(); MSG_INFO("CM energy: " << _sqrts ); _beam_rap = _beam_rap_1; if(bm1.pid()==2212 && bm2.pid()==2212) { //p-p _pp_Pb = true; if( fuzzyEquals( _sqrts/GeV, 7000., 1E-3) ) { _p_pi0_rap_apT = bookProfile1D(1, 1, 2); _h_pi0_rap_pT.addHistogram( 8.8, 9.0, bookHisto1D(2, 1, 2)); _h_pi0_rap_pT.addHistogram( 9.0, 9.2, bookHisto1D(3, 1, 2)); _h_pi0_rap_pT.addHistogram( 9.2, 9.4, bookHisto1D(4, 1, 2)); _h_pi0_rap_pT.addHistogram( 9.4, 9.6, bookHisto1D(5, 1, 2)); _h_pi0_rap_pT.addHistogram( 9.6, 9.8, bookHisto1D(6, 1, 2)); _h_pi0_rap_pT.addHistogram( 9.8, 10.0, bookHisto1D(7, 1, 2)); _h_pi0_rap_pT.addHistogram( 10.0, 10.2, bookHisto1D(8, 1, 2)); _h_pi0_rap_pT.addHistogram( 10.2, 10.4, bookHisto1D(9, 1, 2)); _h_pi0_rap_pT.addHistogram( 10.4, 10.6, bookHisto1D(10, 1, 2)); _h_pi0_rap_pT.addHistogram( 10.6, 10.8, bookHisto1D(11, 1, 2)); _h_pi0_pT_pZ.addHistogram( 0.0, 0.2, bookHisto1D(12, 1, 2)); _h_pi0_pT_pZ.addHistogram( 0.2, 0.4, bookHisto1D(13, 1, 2)); _h_pi0_pT_pZ.addHistogram( 0.4, 0.6, bookHisto1D(14, 1, 2)); _h_pi0_pT_pZ.addHistogram( 0.6, 0.8, bookHisto1D(15, 1, 2)); _h_pi0_pT_pZ.addHistogram( 0.8, 1.0, bookHisto1D(16, 1, 2)); _h_pi0_rap = bookHisto1D(21, 1, 2); _p_pi0_raploss_apT = bookProfile1D(22, 1, 2); _h_pi0_raploss = bookHisto1D(23, 1, 2); } else if(fuzzyEquals( _sqrts/GeV, 2760., 1E-3) ){ _p_pi0_rap_apT = bookProfile1D(1, 1, 1); _h_pi0_rap_pT.addHistogram( 8.8, 9.0, bookHisto1D(2, 1, 1)); _h_pi0_rap_pT.addHistogram( 9.0, 9.2, bookHisto1D(3, 1, 1)); _h_pi0_rap_pT.addHistogram( 9.2, 9.4, bookHisto1D(4, 1, 1)); _h_pi0_rap_pT.addHistogram( 9.4, 9.6, bookHisto1D(5, 1, 1)); _h_pi0_rap_pT.addHistogram( 9.6, 9.8, bookHisto1D(6, 1, 1)); _h_pi0_pT_pZ.addHistogram( 0.0, 0.2, bookHisto1D(12, 1, 1)); _h_pi0_pT_pZ.addHistogram( 0.2, 0.4, bookHisto1D(13, 1, 1)); _h_pi0_rap = bookHisto1D(21, 1, 1); _p_pi0_raploss_apT = bookProfile1D(22, 1, 1); _h_pi0_raploss = bookHisto1D(23, 1, 1); }else{ MSG_INFO("p-p collisions : energy out of range!"); } } else if (bm1.pid()==2212 && bm2.pid()==1000822080){ //p-Pb _pp_Pb = false; if( fuzzyEquals( _sqrts/sqrt(208.)/GeV, 5020., 1E-3) ) { _p_pi0_rap_apT = bookProfile1D(1, 1, 3); _h_pi0_rap_pT.addHistogram( 8.8, 9.0, bookHisto1D(2, 1, 3)); _h_pi0_rap_pT.addHistogram( 9.0, 9.2, bookHisto1D(3, 1, 3)); _h_pi0_rap_pT.addHistogram( 9.2, 9.4, bookHisto1D(4, 1, 3)); _h_pi0_rap_pT.addHistogram( 9.4, 9.6, bookHisto1D(5, 1, 3)); _h_pi0_rap_pT.addHistogram( 9.6, 9.8, bookHisto1D(6, 1, 3)); _h_pi0_rap_pT.addHistogram( 9.8, 10.0, bookHisto1D(7, 1, 3)); _h_pi0_rap_pT.addHistogram( 10.0, 10.2, bookHisto1D(8, 1, 3)); _h_pi0_rap_pT.addHistogram( 10.2, 10.4, bookHisto1D(9, 1, 3)); _h_pi0_rap_pT.addHistogram( 10.4, 10.6, bookHisto1D(10, 1, 3)); _h_pi0_rap_pT.addHistogram( 10.6, 10.8, bookHisto1D(11, 1, 3)); _h_pi0_pT_pZ.addHistogram( 0.0, 0.2, bookHisto1D(12, 1, 3)); _h_pi0_pT_pZ.addHistogram( 0.2, 0.4, bookHisto1D(13, 1, 3)); _h_pi0_pT_pZ.addHistogram( 0.4, 0.6, bookHisto1D(14, 1, 3)); _h_pi0_pT_pZ.addHistogram( 0.6, 0.8, bookHisto1D(15, 1, 3)); _h_pi0_pT_pZ.addHistogram( 0.8, 1.0, bookHisto1D(16, 1, 3)); //_h_pi0_rap = bookHisto1D(21, 1, 3); _p_pi0_raploss_apT = bookProfile1D(22, 1, 3); //_h_pi0_raploss = bookHisto1D(23, 1, 3); }else{ MSG_INFO("p-Pb collisions : energy out of range!"); } } else { MSG_INFO("Beam PDGID out of range!"); } _nevt = 0.; } /// Perform the per-event analysis void analyze(const Event& event) { _nevt = _nevt + 1.; - const UnstableFinalState &ufs = applyProjection (event, "UFS"); + const UnstableParticles &ufs = applyProjection (event, "UFS"); Particles ufs_particles = ufs.particles(); for (Particle& p: ufs_particles ) { // select neutral pion if(p.abspid() != 111) continue; if(p.pz()/GeV<0.) continue; if(p.pT()/GeVfill( rap , pT_MeV , 1.0 ); _h_pi0_rap_pT.fill( rap, pT , 1.0 / pT ); _h_pi0_pT_pZ.fill( pT, pZ , en / pT); _h_pi0_rap->fill( rap, 1.0 ); _p_pi0_raploss_apT->fill( raploss , pT_MeV , 1.0 ); _h_pi0_raploss->fill( raploss, 1.0 ); } else {//pPb collisions const double pZ = p.pz()/GeV; const double pT = p.pT()/GeV; const double pT_MeV = p.pT()/MeV; const double en = p.E()/GeV; const double rap = p.rap(); const double raploss = _beam_rap_1 - p.rap(); //mitsuka-like _p_pi0_rap_apT->fill( rap , pT_MeV , 1.0 ); _h_pi0_rap_pT.fill( rap, pT , 1.0 / pT ); _h_pi0_pT_pZ.fill( pT, pZ , en / pT); //_h_pi0_rap->fill( rap, 1.0 ); _p_pi0_raploss_apT->fill( raploss , pT_MeV , 1.0 ); //_h_pi0_raploss->fill( raploss, 1.0 ); } } } /// Normalise histograms etc., after the run void finalize() { const double inv_scale_factor = 1. / _nevt / (2.*PI); const double pt_bin_width = 0.2; for (Histo1DPtr h: _h_pi0_pT_pZ.getHistograms()){ if(h->path() == "/LHCF_2016_I1385877/d12-x01-y01" || h->path() == "/LHCF_2016_I1385877/d12-x01-y02" || h->path() == "/LHCF_2016_I1385877/d12-x01-y03") h->scaleW( inv_scale_factor / (pt_bin_width-pt_cutoff) ); else h->scaleW( inv_scale_factor / pt_bin_width ); } const double scale_factor = 1. / _nevt / (2.*PI); const double rap_bin_width = 0.2; for (Histo1DPtr h: _h_pi0_rap_pT.getHistograms()) { const int cutoff_bin = h->binIndexAt(pt_cutoff); if(cutoff_bin>=0) { // for(unsigned int ibin=0; ibinnumBins(); ++ibin) // cout << ibin << " " << h->bin(ibin).area() << endl; const double cutoff_wdt = h->bin(cutoff_bin).xMax()-h->bin(cutoff_bin).xMin(); h->bin(cutoff_bin).scaleW((cutoff_wdt)/(cutoff_wdt-pt_cutoff)); // for(unsigned int ibin=0; ibinnumBins(); ++ibin) // cout << ibin << " " << h->bin(ibin).area() << endl; } h->scaleW( scale_factor / rap_bin_width ); } if(_pp_Pb) { scale( _h_pi0_rap , 1. / _nevt ); scale( _h_pi0_raploss , 1. / _nevt ); } } //@} private: /// @name Histograms //@{ bool _pp_Pb; double _nevt; double _beam_rap; double _beam_rap_1; double _beam_rap_2; BinnedHistogram _h_pi0_pT_pZ; BinnedHistogram _h_pi0_rap_pT; Profile1DPtr _p_pi0_rap_apT; Histo1DPtr _h_pi0_rap; Profile1DPtr _p_pi0_raploss_apT; Histo1DPtr _h_pi0_raploss; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCF_2016_I1385877); } diff --git a/analyses/pluginMC/MC_HFJETS.cc b/analyses/pluginMC/MC_HFJETS.cc --- a/analyses/pluginMC/MC_HFJETS.cc +++ b/analyses/pluginMC/MC_HFJETS.cc @@ -1,151 +1,151 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/PrimaryHadrons.hh" #include "Rivet/Projections/HeavyHadrons.hh" namespace Rivet { class MC_HFJETS : public Analysis { public: /// Constructor MC_HFJETS() : Analysis("MC_HFJETS") { } public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FastJets fj(FinalState(-5, 5), FastJets::ANTIKT, 0.6); fj.useInvisibles(); declare(fj, "Jets"); declare(HeavyHadrons(Cuts::abseta < 5 && Cuts::pT > 500*MeV), "BCHadrons"); _h_ptCJetLead = bookHisto1D("ptCJetLead", linspace(5, 0, 20, false) + logspace(25, 20, 200)); _h_ptCHadrLead = bookHisto1D("ptCHadrLead", linspace(5, 0, 10, false) + logspace(25, 10, 200)); _h_ptFracC = bookHisto1D("ptfracC", 50, 0, 1.5); _h_eFracC = bookHisto1D("efracC", 50, 0, 1.5); _h_ptBJetLead = bookHisto1D("ptBJetLead", linspace(5, 0, 20, false) + logspace(25, 20, 200)); _h_ptBHadrLead = bookHisto1D("ptBHadrLead", linspace(5, 0, 10, false) + logspace(25, 10, 200)); _h_ptFracB = bookHisto1D("ptfracB", 50, 0, 1.5); _h_eFracB = bookHisto1D("efracB", 50, 0, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // Get jets and heavy hadrons const Jets& jets = apply(event, "Jets").jetsByPt(); const Particles bhadrons = sortByPt(apply(event, "BCHadrons").bHadrons()); const Particles chadrons = sortByPt(apply(event, "BCHadrons").cHadrons()); MSG_DEBUG("# b hadrons = " << bhadrons.size() << ", # c hadrons = " << chadrons.size()); // Max HF hadron--jet axis dR to be regarded as a jet tag const double MAX_DR = 0.3; // Tag the leading b and c jets with a deltaR < 0.3 match // b-tagged jet are excluded from also being considered as c-tagged /// @todo Do this again with the ghost match? MSG_DEBUG("Getting b/c-tags"); bool gotLeadingB = false, gotLeadingC = false;; foreach (const Jet& j, jets) { if (!gotLeadingB) { FourMomentum leadBJet, leadBHadr; double dRmin = MAX_DR; foreach (const Particle& b, bhadrons) { const double dRcand = min(dRmin, deltaR(j, b)); if (dRcand < dRmin) { dRmin = dRcand; leadBJet = j.momentum(); leadBHadr = b.momentum(); MSG_DEBUG("New closest b-hadron jet tag candidate: dR = " << dRmin << " for jet pT = " << j.pT()/GeV << " GeV, " << " b hadron pT = " << b.pT()/GeV << " GeV, PID = " << b.pid()); } } if (dRmin < MAX_DR) { // A jet has been tagged, so fill the histos and break the loop _h_ptBJetLead->fill(leadBJet.pT()/GeV, weight); _h_ptBHadrLead->fill(leadBHadr.pT()/GeV, weight); _h_ptFracB->fill(leadBHadr.pT() / leadBJet.pT(), weight); _h_eFracB->fill(leadBHadr.E() / leadBJet.E(), weight); gotLeadingB = true; continue; // escape this loop iteration so the same jet isn't c-tagged } } if (!gotLeadingC) { FourMomentum leadCJet, leadCHadr; double dRmin = MAX_DR; foreach (const Particle& c, chadrons) { const double dRcand = min(dRmin, deltaR(j, c)); if (dRcand < dRmin) { dRmin = dRcand; leadCJet = j.momentum(); leadCHadr = c.momentum(); MSG_DEBUG("New closest c-hadron jet tag candidate: dR = " << dRmin << " for jet pT = " << j.pT()/GeV << " GeV, " << " c hadron pT = " << c.pT()/GeV << " GeV, PID = " << c.pid()); } } if (dRmin < MAX_DR) { // A jet has been tagged, so fill the histos and break the loop _h_ptCJetLead->fill(leadCJet.pT()/GeV, weight); _h_ptCHadrLead->fill(leadCHadr.pT()/GeV, weight); _h_ptFracC->fill(leadCHadr.pT() / leadCJet.pT(), weight); _h_eFracC->fill(leadCHadr.E() / leadCJet.E(), weight); gotLeadingB = true; } } // If we've found both a leading b and a leading c jet, break the loop over jets if (gotLeadingB && gotLeadingC) break; } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_ptCJetLead); normalize(_h_ptCHadrLead); normalize(_h_ptFracC); normalize(_h_eFracC); normalize(_h_ptBJetLead); normalize(_h_ptBHadrLead); normalize(_h_ptFracB); normalize(_h_eFracB); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_ptCJetLead, _h_ptCHadrLead, _h_ptFracC, _h_eFracC; Histo1DPtr _h_ptBJetLead, _h_ptBHadrLead, _h_ptFracB, _h_eFracB; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_HFJETS); } diff --git a/analyses/pluginMC/MC_IDENTIFIED.cc b/analyses/pluginMC/MC_IDENTIFIED.cc --- a/analyses/pluginMC/MC_IDENTIFIED.cc +++ b/analyses/pluginMC/MC_IDENTIFIED.cc @@ -1,104 +1,104 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// Generic analysis looking at various distributions of final state particles /// @todo Rename as MC_HADRONS class MC_IDENTIFIED : public Analysis { public: /// Constructor MC_IDENTIFIED() : Analysis("MC_IDENTIFIED") { } public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Projections const FinalState cnfs(Cuts::abseta < 5.0 && Cuts::pT > 500*MeV); declare(cnfs, "FS"); - declare(UnstableFinalState(Cuts::abseta < 5.0 && Cuts::pT > 500*MeV), "UFS"); + declare(UnstableParticles(Cuts::abseta < 5.0 && Cuts::pT > 500*MeV), "UFS"); // Histograms // @todo Choose E/pT ranged based on input energies... can't do anything about kin. cuts, though _histStablePIDs = bookHisto1D("MultsStablePIDs", 3335, -0.5, 3334.5); _histDecayedPIDs = bookHisto1D("MultsDecayedPIDs", 3335, -0.5, 3334.5); _histAllPIDs = bookHisto1D("MultsAllPIDs", 3335, -0.5, 3334.5); _histEtaPi = bookHisto1D("EtaPi", 25, 0, 5); _histEtaK = bookHisto1D("EtaK", 25, 0, 5); _histEtaLambda = bookHisto1D("EtaLambda", 25, 0, 5); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // Unphysical (debug) plotting of all PIDs in the event, physical or otherwise foreach (const GenParticle* gp, particles(event.genEvent())) { _histAllPIDs->fill(abs(gp->pdg_id()), weight); } // Charged + neutral final state PIDs const FinalState& cnfs = apply(event, "FS"); foreach (const Particle& p, cnfs.particles()) { _histStablePIDs->fill(p.abspid(), weight); } // Unstable PIDs and identified particle eta spectra - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); foreach (const Particle& p, ufs.particles()) { _histDecayedPIDs->fill(p.pid(), weight); const double eta_abs = p.abseta(); const PdgId pid = p.abspid(); //if (PID::isMeson(pid) && PID::hasStrange()) { if (pid == 211 || pid == 111) _histEtaPi->fill(eta_abs, weight); else if (pid == 321 || pid == 130 || pid == 310) _histEtaK->fill(eta_abs, weight); else if (pid == 3122) _histEtaLambda->fill(eta_abs, weight); } } /// Finalize void finalize() { scale(_histStablePIDs, 1/sumOfWeights()); scale(_histDecayedPIDs, 1/sumOfWeights()); scale(_histAllPIDs, 1/sumOfWeights()); scale(_histEtaPi, 1/sumOfWeights()); scale(_histEtaK, 1/sumOfWeights()); scale(_histEtaLambda, 1/sumOfWeights()); } //@} private: /// @name Histograms //@{ Histo1DPtr _histStablePIDs, _histDecayedPIDs, _histAllPIDs; Histo1DPtr _histEtaPi, _histEtaK, _histEtaLambda; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_IDENTIFIED); } diff --git a/analyses/pluginMC/MC_VH2BB.cc b/analyses/pluginMC/MC_VH2BB.cc --- a/analyses/pluginMC/MC_VH2BB.cc +++ b/analyses/pluginMC/MC_VH2BB.cc @@ -1,262 +1,262 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/WFinder.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Math/LorentzTrans.hh" namespace Rivet { class MC_VH2BB : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor MC_VH2BB() : Analysis("MC_VH2BB") { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; Cut cut = Cuts::abseta < 3.5 && Cuts::pT > 25*GeV; ZFinder zeefinder(fs, cut, PID::ELECTRON, 65*GeV, 115*GeV, 0.2); declare(zeefinder, "ZeeFinder"); ZFinder zmmfinder(fs, cut, PID::MUON, 65*GeV, 115*GeV, 0.2); declare(zmmfinder, "ZmmFinder"); WFinder wefinder(fs, cut, PID::ELECTRON, 60*GeV, 100*GeV, 25*GeV, 0.2); declare(wefinder, "WeFinder"); WFinder wmfinder(fs, cut, PID::MUON, 60*GeV, 100*GeV, 25*GeV, 0.2); declare(wmfinder, "WmFinder"); declare(fs, "FinalState"); declare(FastJets(fs, FastJets::ANTIKT, 0.4), "AntiKT04"); declare(FastJets(fs, FastJets::ANTIKT, 0.5), "AntiKT05"); declare(FastJets(fs, FastJets::ANTIKT, 0.6), "AntiKT06"); /// Book histograms _h_jet_bb_Delta_eta = bookHisto1D("jet_bb_Delta_eta", 50, 0, 4); _h_jet_bb_Delta_phi = bookHisto1D("jet_bb_Delta_phi", 50, 0, M_PI); _h_jet_bb_Delta_pT = bookHisto1D("jet_bb_Delta_pT", 50,0, 500); _h_jet_bb_Delta_R = bookHisto1D("jet_bb_Delta_R", 50, 0, 5); _h_jet_b_jet_eta = bookHisto1D("jet_b_jet_eta", 50, -4, 4); _h_jet_b_jet_multiplicity = bookHisto1D("jet_b_jet_multiplicity", 11, -0.5, 10.5); _h_jet_b_jet_phi = bookHisto1D("jet_b_jet_phi", 50, 0, 2.*M_PI); _h_jet_b_jet_pT = bookHisto1D("jet_b_jet_pT", 50, 0, 500); _h_jet_H_eta_using_bb = bookHisto1D("jet_H_eta_using_bb", 50, -4, 4); _h_jet_H_mass_using_bb = bookHisto1D("jet_H_mass_using_bb", 50, 50, 200); _h_jet_H_phi_using_bb = bookHisto1D("jet_H_phi_using_bb", 50, 0, 2.*M_PI); _h_jet_H_pT_using_bb = bookHisto1D("jet_H_pT_using_bb", 50, 0, 500); _h_jet_eta = bookHisto1D("jet_eta", 50, -4, 4); _h_jet_multiplicity = bookHisto1D("jet_multiplicity", 11, -0.5, 10.5); _h_jet_phi = bookHisto1D("jet_phi", 50, 0, 2.*M_PI); _h_jet_pT = bookHisto1D("jet_pT", 50, 0, 500); _h_jet_VBbb_Delta_eta = bookHisto1D("jet_VBbb_Delta_eta", 50, 0, 4); _h_jet_VBbb_Delta_phi = bookHisto1D("jet_VBbb_Delta_phi", 50, 0, M_PI); _h_jet_VBbb_Delta_pT = bookHisto1D("jet_VBbb_Delta_pT", 50, 0, 500); _h_jet_VBbb_Delta_R = bookHisto1D("jet_VBbb_Delta_R", 50, 0, 8); _h_VB_eta = bookHisto1D("VB_eta", 50, -4, 4); _h_VB_mass = bookHisto1D("VB_mass", 50, 60, 110); _h_Z_multiplicity = bookHisto1D("Z_multiplicity", 11, -0.5, 10.5); _h_W_multiplicity = bookHisto1D("W_multiplicity", 11, -0.5, 10.5); _h_VB_phi = bookHisto1D("VB_phi", 50, 0, 2.*M_PI); _h_VB_pT = bookHisto1D("VB_pT", 50, 0, 500); _h_jet_bVB_angle_Hframe = bookHisto1D("jet_bVB_angle_Hframe", 50, 0, M_PI); _h_jet_bVB_cosangle_Hframe = bookHisto1D("jet_bVB_cosangle_Hframe", 50, -1, 1); _h_jet_bb_angle_Hframe = bookHisto1D("jet_bb_angle_Hframe", 50, 0, M_PI); _h_jet_bb_cosangle_Hframe = bookHisto1D("jet_bb_cosangle_Hframe", 50, -1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); const double JETPTCUT = 30*GeV; const ZFinder& zeefinder = apply(event, "ZeeFinder"); const ZFinder& zmmfinder = apply(event, "ZmmFinder"); const WFinder& wefinder = apply(event, "WeFinder"); const WFinder& wmfinder = apply(event, "WmFinder"); const Particles vectorBosons = zeefinder.bosons() + zmmfinder.bosons() + wefinder.bosons() + wmfinder.bosons(); _h_Z_multiplicity->fill(zeefinder.bosons().size() + zmmfinder.bosons().size(), weight); _h_W_multiplicity->fill(wefinder.bosons().size() + wmfinder.bosons().size(), weight); const Jets jets = apply(event, "AntiKT04").jetsByPt(JETPTCUT); _h_jet_multiplicity->fill(jets.size(), weight); // Identify the b-jets Jets bjets; foreach (const Jet& jet, jets) { const double jetEta = jet.eta(); const double jetPhi = jet.phi(); const double jetPt = jet.pT(); _h_jet_eta->fill(jetEta, weight); _h_jet_phi->fill(jetPhi, weight); _h_jet_pT->fill(jetPt/GeV, weight); if (jet.bTagged() && jet.pT() > JETPTCUT) { bjets.push_back(jet); _h_jet_b_jet_eta->fill( jetEta , weight ); _h_jet_b_jet_phi->fill( jetPhi , weight ); _h_jet_b_jet_pT->fill( jetPt , weight ); } } _h_jet_b_jet_multiplicity->fill(bjets.size(), weight); // Plot vector boson properties foreach (const Particle& v, vectorBosons) { _h_VB_phi->fill(v.phi(), weight); _h_VB_pT->fill(v.pT(), weight); _h_VB_eta->fill(v.eta(), weight); _h_VB_mass->fill(v.mass(), weight); } // rest of analysis requires at least 1 b jets if(bjets.empty()) vetoEvent; // Construct Higgs candidates from pairs of b-jets for (size_t i = 0; i < bjets.size()-1; ++i) { for (size_t j = i+1; j < bjets.size(); ++j) { const Jet& jet1 = bjets[i]; const Jet& jet2 = bjets[j]; const double deltaEtaJJ = fabs(jet1.eta() - jet2.eta()); const double deltaPhiJJ = deltaPhi(jet1.momentum(), jet2.momentum()); const double deltaRJJ = deltaR(jet1.momentum(), jet2.momentum()); const double deltaPtJJ = fabs(jet1.pT() - jet2.pT()); _h_jet_bb_Delta_eta->fill(deltaEtaJJ, weight); _h_jet_bb_Delta_phi->fill(deltaPhiJJ, weight); _h_jet_bb_Delta_pT->fill(deltaPtJJ, weight); _h_jet_bb_Delta_R->fill(deltaRJJ, weight); const FourMomentum phiggs = jet1.momentum() + jet2.momentum(); _h_jet_H_eta_using_bb->fill(phiggs.eta(), weight); _h_jet_H_mass_using_bb->fill(phiggs.mass(), weight); _h_jet_H_phi_using_bb->fill(phiggs.phi(), weight); _h_jet_H_pT_using_bb->fill(phiggs.pT(), weight); foreach (const Particle& v, vectorBosons) { const double deltaEtaVH = fabs(phiggs.eta() - v.eta()); const double deltaPhiVH = deltaPhi(phiggs, v.momentum()); const double deltaRVH = deltaR(phiggs, v.momentum()); const double deltaPtVH = fabs(phiggs.pT() - v.pT()); _h_jet_VBbb_Delta_eta->fill(deltaEtaVH, weight); _h_jet_VBbb_Delta_phi->fill(deltaPhiVH, weight); _h_jet_VBbb_Delta_pT->fill(deltaPtVH, weight); _h_jet_VBbb_Delta_R->fill(deltaRVH, weight); // Calculate boost angles const vector angles = boostAngles(jet1.momentum(), jet2.momentum(), v.momentum()); _h_jet_bVB_angle_Hframe->fill(angles[0], weight); _h_jet_bb_angle_Hframe->fill(angles[1], weight); _h_jet_bVB_cosangle_Hframe->fill(cos(angles[0]), weight); _h_jet_bb_cosangle_Hframe->fill(cos(angles[1]), weight); } } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_jet_bb_Delta_eta, crossSection()/sumOfWeights()); scale(_h_jet_bb_Delta_phi, crossSection()/sumOfWeights()); scale(_h_jet_bb_Delta_pT, crossSection()/sumOfWeights()); scale(_h_jet_bb_Delta_R, crossSection()/sumOfWeights()); scale(_h_jet_b_jet_eta, crossSection()/sumOfWeights()); scale(_h_jet_b_jet_multiplicity, crossSection()/sumOfWeights()); scale(_h_jet_b_jet_phi, crossSection()/sumOfWeights()); scale(_h_jet_b_jet_pT, crossSection()/sumOfWeights()); scale(_h_jet_H_eta_using_bb, crossSection()/sumOfWeights()); scale(_h_jet_H_mass_using_bb, crossSection()/sumOfWeights()); scale(_h_jet_H_phi_using_bb, crossSection()/sumOfWeights()); scale(_h_jet_H_pT_using_bb, crossSection()/sumOfWeights()); scale(_h_jet_eta, crossSection()/sumOfWeights()); scale(_h_jet_multiplicity, crossSection()/sumOfWeights()); scale(_h_jet_phi, crossSection()/sumOfWeights()); scale(_h_jet_pT, crossSection()/sumOfWeights()); scale(_h_jet_VBbb_Delta_eta, crossSection()/sumOfWeights()); scale(_h_jet_VBbb_Delta_phi, crossSection()/sumOfWeights()); scale(_h_jet_VBbb_Delta_pT, crossSection()/sumOfWeights()); scale(_h_jet_VBbb_Delta_R, crossSection()/sumOfWeights()); scale(_h_VB_eta, crossSection()/sumOfWeights()); scale(_h_VB_mass, crossSection()/sumOfWeights()); scale(_h_Z_multiplicity, crossSection()/sumOfWeights()); scale(_h_W_multiplicity, crossSection()/sumOfWeights()); scale(_h_VB_phi, crossSection()/sumOfWeights()); scale(_h_VB_pT, crossSection()/sumOfWeights()); scale(_h_jet_bVB_angle_Hframe, crossSection()/sumOfWeights()); scale(_h_jet_bb_angle_Hframe, crossSection()/sumOfWeights()); scale(_h_jet_bVB_cosangle_Hframe, crossSection()/sumOfWeights()); scale(_h_jet_bb_cosangle_Hframe, crossSection()/sumOfWeights()); } /// This should take in the four-momenta of two b's (jets/hadrons) and a vector boson, for the process VB*->VBH with H->bb /// It should return the smallest angle between the virtual vector boson and one of the b's, in the rest frame of the Higgs boson. /// It should also return (as the second element of the vector) the angle between the b's, in the rest frame of the Higgs boson. vector boostAngles(const FourMomentum& b1, const FourMomentum& b2, const FourMomentum& vb) { const FourMomentum higgsMomentum = b1 + b2; const FourMomentum virtualVBMomentum = higgsMomentum + vb; const LorentzTransform lt = LorentzTransform::mkFrameTransformFromBeta(higgsMomentum.betaVec()); const FourMomentum virtualVBMomentumBOOSTED = lt.transform(virtualVBMomentum); const FourMomentum b1BOOSTED = lt.transform(b1); const FourMomentum b2BOOSTED = lt.transform(b2); const double angle1 = b1BOOSTED.angle(virtualVBMomentumBOOSTED); const double angle2 = b2BOOSTED.angle(virtualVBMomentumBOOSTED); const double anglebb = b1BOOSTED.angle(b2BOOSTED); vector rtn; rtn.push_back(angle1 < angle2 ? angle1 : angle2); rtn.push_back(anglebb); return rtn; } //@} private: /// @name Histograms //@{ Histo1DPtr _h_Z_multiplicity, _h_W_multiplicity; Histo1DPtr _h_jet_bb_Delta_eta, _h_jet_bb_Delta_phi, _h_jet_bb_Delta_pT, _h_jet_bb_Delta_R; Histo1DPtr _h_jet_b_jet_eta, _h_jet_b_jet_multiplicity, _h_jet_b_jet_phi, _h_jet_b_jet_pT; Histo1DPtr _h_jet_H_eta_using_bb, _h_jet_H_mass_using_bb, _h_jet_H_phi_using_bb, _h_jet_H_pT_using_bb; Histo1DPtr _h_jet_eta, _h_jet_multiplicity, _h_jet_phi, _h_jet_pT; Histo1DPtr _h_jet_VBbb_Delta_eta, _h_jet_VBbb_Delta_phi, _h_jet_VBbb_Delta_pT, _h_jet_VBbb_Delta_R; Histo1DPtr _h_VB_eta, _h_VB_mass, _h_VB_phi, _h_VB_pT; Histo1DPtr _h_jet_bVB_angle_Hframe, _h_jet_bb_angle_Hframe, _h_jet_bVB_cosangle_Hframe, _h_jet_bb_cosangle_Hframe; //Histo1DPtr _h_jet_cuts_bb_deltaR_v_HpT; //@} }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(MC_VH2BB); } diff --git a/analyses/pluginMisc/ARGUS_1993_S2653028.cc b/analyses/pluginMisc/ARGUS_1993_S2653028.cc --- a/analyses/pluginMisc/ARGUS_1993_S2653028.cc +++ b/analyses/pluginMisc/ARGUS_1993_S2653028.cc @@ -1,177 +1,177 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief BELLE pi+/-, K+/- and proton/antiproton spectrum at Upsilon(4S) /// @author Peter Richardson class ARGUS_1993_S2653028 : public Analysis { public: ARGUS_1993_S2653028() : Analysis("ARGUS_1993_S2653028"), _weightSum(0.) { } void analyze(const Event& e) { const double weight = e.weight(); // Find the upsilons Particles upsilons; // First in unstable final state - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { if (p.pid() == 300553) upsilons.push_back(p); } // Then in whole event if that failed if (upsilons.empty()) { foreach (const GenParticle* p, particles(e.genEvent())) { if (p->pdg_id() != 300553) continue; const GenVertex* pv = p->production_vertex(); bool passed = true; if (pv) { foreach (const GenParticle* pp, particles_in(pv)) { if ( p->pdg_id() == pp->pdg_id() ) { passed = false; break; } } } if (passed) upsilons.push_back(Particle(*p)); } } // Find an upsilon foreach (const Particle& p, upsilons) { _weightSum += weight; vector pionsA,pionsB,protonsA,protonsB,kaons; // Find the decay products we want findDecayProducts(p.genParticle(), pionsA, pionsB, protonsA, protonsB, kaons); LorentzTransform cms_boost; if (p.p3().mod() > 1*MeV) cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec()); for (size_t ix = 0; ix < pionsA.size(); ++ix) { FourMomentum ptemp(pionsA[ix]->momentum()); FourMomentum p2 = cms_boost.transform(ptemp); double pcm = cms_boost.transform(ptemp).vector3().mod(); _histPiA->fill(pcm,weight); } _multPiA->fill(10.58,double(pionsA.size())*weight); for (size_t ix = 0; ix < pionsB.size(); ++ix) { double pcm = cms_boost.transform(FourMomentum(pionsB[ix]->momentum())).vector3().mod(); _histPiB->fill(pcm,weight); } _multPiB->fill(10.58,double(pionsB.size())*weight); for (size_t ix = 0; ix < protonsA.size(); ++ix) { double pcm = cms_boost.transform(FourMomentum(protonsA[ix]->momentum())).vector3().mod(); _histpA->fill(pcm,weight); } _multpA->fill(10.58,double(protonsA.size())*weight); for (size_t ix = 0; ix < protonsB.size(); ++ix) { double pcm = cms_boost.transform(FourMomentum(protonsB[ix]->momentum())).vector3().mod(); _histpB->fill(pcm,weight); } _multpB->fill(10.58,double(protonsB.size())*weight); for (size_t ix = 0 ;ix < kaons.size(); ++ix) { double pcm = cms_boost.transform(FourMomentum(kaons[ix]->momentum())).vector3().mod(); _histKA->fill(pcm,weight); _histKB->fill(pcm,weight); } _multK->fill(10.58,double(kaons.size())*weight); } } void finalize() { if (_weightSum > 0.) { scale(_histPiA, 1./_weightSum); scale(_histPiB, 1./_weightSum); scale(_histKA , 1./_weightSum); scale(_histKB , 1./_weightSum); scale(_histpA , 1./_weightSum); scale(_histpB , 1./_weightSum); scale(_multPiA, 1./_weightSum); scale(_multPiB, 1./_weightSum); scale(_multK , 1./_weightSum); scale(_multpA , 1./_weightSum); scale(_multpB , 1./_weightSum); } } void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // spectra _histPiA = bookHisto1D(1, 1, 1); _histPiB = bookHisto1D(2, 1, 1); _histKA = bookHisto1D(3, 1, 1); _histKB = bookHisto1D(6, 1, 1); _histpA = bookHisto1D(4, 1, 1); _histpB = bookHisto1D(5, 1, 1); // multiplicities _multPiA = bookHisto1D( 7, 1, 1); _multPiB = bookHisto1D( 8, 1, 1); _multK = bookHisto1D( 9, 1, 1); _multpA = bookHisto1D(10, 1, 1); _multpB = bookHisto1D(11, 1, 1); } // init private: //@{ /// Count of weights double _weightSum; /// Spectra Histo1DPtr _histPiA, _histPiB, _histKA, _histKB, _histpA, _histpB; /// Multiplicities Histo1DPtr _multPiA, _multPiB, _multK, _multpA, _multpB; //@} void findDecayProducts(const GenParticle* p, vector& pionsA, vector& pionsB, vector& protonsA, vector& protonsB, vector& kaons) { int parentId = p->pdg_id(); const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { int id = abs((*pp)->pdg_id()); if (id == PID::PIPLUS) { if (parentId != PID::LAMBDA && parentId != PID::K0S) { pionsA.push_back(*pp); pionsB.push_back(*pp); } else pionsB.push_back(*pp); } else if (id == PID::PROTON) { if (parentId != PID::LAMBDA && parentId != PID::K0S) { protonsA.push_back(*pp); protonsB.push_back(*pp); } else protonsB.push_back(*pp); } else if (id == PID::KPLUS) { kaons.push_back(*pp); } else if ((*pp)->end_vertex()) findDecayProducts(*pp, pionsA, pionsB, protonsA, protonsB, kaons); } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ARGUS_1993_S2653028); } diff --git a/analyses/pluginMisc/ARGUS_1993_S2669951.cc b/analyses/pluginMisc/ARGUS_1993_S2669951.cc --- a/analyses/pluginMisc/ARGUS_1993_S2669951.cc +++ b/analyses/pluginMisc/ARGUS_1993_S2669951.cc @@ -1,192 +1,192 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Production of the $\eta'(958)$ and $f_0(980)$ in $e^+e^-$ annihilation in the Upsilon region /// @author Peter Richardson class ARGUS_1993_S2669951 : public Analysis { public: ARGUS_1993_S2669951() : Analysis("ARGUS_1993_S2669951"), _count_etaPrime_highZ(2, 0.), _count_etaPrime_allZ(3, 0.), _count_f0(3, 0.), _weightSum_cont(0.), _weightSum_Ups1(0.), _weightSum_Ups2(0.) { } void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _hist_cont_f0 = bookHisto1D(2, 1, 1); _hist_Ups1_f0 = bookHisto1D(3, 1, 1); _hist_Ups2_f0 = bookHisto1D(4, 1, 1); } void analyze(const Event& e) { // Find the Upsilons among the unstables - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); Particles upsilons; // First in unstable final state foreach (const Particle& p, ufs.particles()) if (p.pid() == 553 || p.pid() == 100553) upsilons.push_back(p); // Then in whole event if fails if (upsilons.empty()) { /// @todo Replace HepMC digging with Particle::descendents etc. calls foreach (const GenParticle* p, Rivet::particles(e.genEvent())) { if ( p->pdg_id() != 553 && p->pdg_id() != 100553 ) continue; // Discard it if its parent has the same PDG ID code (avoid duplicates) const GenVertex* pv = p->production_vertex(); bool passed = true; if (pv) { foreach (const GenParticle* pp, particles_in(pv)) { if ( p->pdg_id() == pp->pdg_id() ) { passed = false; break; } } } if (passed) upsilons.push_back(Particle(*p)); } } // Finding done, now fill counters const double weight = e.weight(); if (upsilons.empty()) { // Continuum MSG_DEBUG("No Upsilons found => continuum event"); _weightSum_cont += weight; unsigned int nEtaA(0), nEtaB(0), nf0(0); foreach (const Particle& p, ufs.particles()) { const int id = p.abspid(); const double xp = 2.*p.E()/sqrtS(); const double beta = p.p3().mod() / p.E(); if (id == 9010221) { _hist_cont_f0->fill(xp, weight/beta); nf0 += 1; } else if (id == 331) { if (xp > 0.35) nEtaA += 1; nEtaB += 1; } } _count_f0[2] += nf0*weight; _count_etaPrime_highZ[1] += nEtaA*weight; _count_etaPrime_allZ[2] += nEtaB*weight; } else { // Upsilon(s) found MSG_DEBUG("Upsilons found => resonance event"); foreach (const Particle& ups, upsilons) { const int parentId = ups.pid(); ((parentId == 553) ? _weightSum_Ups1 : _weightSum_Ups2) += weight; Particles unstable; // Find the decay products we want findDecayProducts(ups.genParticle(), unstable); LorentzTransform cms_boost; if (ups.p3().mod() > 1*MeV) cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec()); const double mass = ups.mass(); unsigned int nEtaA(0), nEtaB(0), nf0(0); foreach(const Particle& p, unstable) { const int id = p.abspid(); const FourMomentum p2 = cms_boost.transform(p.momentum()); const double xp = 2.*p2.E()/mass; const double beta = p2.p3().mod()/p2.E(); if (id == 9010221) { //< ? ((parentId == 553) ? _hist_Ups1_f0 : _hist_Ups2_f0)->fill(xp, weight/beta); nf0 += 1; } else if (id == 331) { //< ? if (xp > 0.35) nEtaA += 1; nEtaB += 1; } } if (parentId == 553) { _count_f0[0] += nf0*weight; _count_etaPrime_highZ[0] += nEtaA*weight; _count_etaPrime_allZ[0] += nEtaB*weight; } else { _count_f0[1] += nf0*weight; _count_etaPrime_allZ[1] += nEtaB*weight; } } } } void finalize() { // High-Z eta' multiplicity Scatter2DPtr s111 = bookScatter2D(1, 1, 1, true); if (_weightSum_Ups1 > 0) // Point at 9.460 s111->point(0).setY(_count_etaPrime_highZ[0] / _weightSum_Ups1, 0); if (_weightSum_cont > 0) // Point at 9.905 s111->point(1).setY(_count_etaPrime_highZ[1] / _weightSum_cont, 0); // All-Z eta' multiplicity Scatter2DPtr s112 = bookScatter2D(1, 1, 2, true); if (_weightSum_Ups1 > 0) // Point at 9.460 s112->point(0).setY(_count_etaPrime_allZ[0] / _weightSum_Ups1, 0); if (_weightSum_cont > 0) // Point at 9.905 s112->point(1).setY(_count_etaPrime_allZ[2] / _weightSum_cont, 0); if (_weightSum_Ups2 > 0) // Point at 10.02 s112->point(2).setY(_count_etaPrime_allZ[1] / _weightSum_Ups2, 0); // f0 multiplicity Scatter2DPtr s511 = bookScatter2D(5, 1, 1, true); if (_weightSum_Ups1 > 0) // Point at 9.46 s511->point(0).setY(_count_f0[0] / _weightSum_Ups1, 0); if (_weightSum_Ups2 > 0) // Point at 10.02 s511->point(1).setY(_count_f0[1] / _weightSum_Ups2, 0); if (_weightSum_cont > 0) // Point at 10.45 s511->point(2).setY(_count_f0[2] / _weightSum_cont, 0); // Scale histos if (_weightSum_cont > 0.) scale(_hist_cont_f0, 1./_weightSum_cont); if (_weightSum_Ups1 > 0.) scale(_hist_Ups1_f0, 1./_weightSum_Ups1); if (_weightSum_Ups2 > 0.) scale(_hist_Ups2_f0, 1./_weightSum_Ups2); } private: /// @name Counters //@{ vector _count_etaPrime_highZ, _count_etaPrime_allZ, _count_f0; double _weightSum_cont,_weightSum_Ups1,_weightSum_Ups2; //@} /// Histos Histo1DPtr _hist_cont_f0, _hist_Ups1_f0, _hist_Ups2_f0; /// Recursively walk the HepMC tree to find decay products of @a p void findDecayProducts(const GenParticle* p, Particles& unstable) { const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { const int id = abs((*pp)->pdg_id()); if (id == 331 || id == 9010221) unstable.push_back(Particle(*pp)); else if ((*pp)->end_vertex()) findDecayProducts(*pp, unstable); } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ARGUS_1993_S2669951); } diff --git a/analyses/pluginMisc/ARGUS_1993_S2789213.cc b/analyses/pluginMisc/ARGUS_1993_S2789213.cc --- a/analyses/pluginMisc/ARGUS_1993_S2789213.cc +++ b/analyses/pluginMisc/ARGUS_1993_S2789213.cc @@ -1,256 +1,256 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief ARGUS vector meson production /// @author Peter Richardson class ARGUS_1993_S2789213 : public Analysis { public: ARGUS_1993_S2789213() : Analysis("ARGUS_1993_S2789213"), _weightSum_cont(0.),_weightSum_Ups1(0.),_weightSum_Ups4(0.) { } void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _mult_cont_Omega = bookHisto1D( 1, 1, 1); _mult_cont_Rho0 = bookHisto1D( 1, 1, 2); _mult_cont_KStar0 = bookHisto1D( 1, 1, 3); _mult_cont_KStarPlus = bookHisto1D( 1, 1, 4); _mult_cont_Phi = bookHisto1D( 1, 1, 5); _mult_Ups1_Omega = bookHisto1D( 2, 1, 1); _mult_Ups1_Rho0 = bookHisto1D( 2, 1, 2); _mult_Ups1_KStar0 = bookHisto1D( 2, 1, 3); _mult_Ups1_KStarPlus = bookHisto1D( 2, 1, 4); _mult_Ups1_Phi = bookHisto1D( 2, 1, 5); _mult_Ups4_Omega = bookHisto1D( 3, 1, 1); _mult_Ups4_Rho0 = bookHisto1D( 3, 1, 2); _mult_Ups4_KStar0 = bookHisto1D( 3, 1, 3); _mult_Ups4_KStarPlus = bookHisto1D( 3, 1, 4); _mult_Ups4_Phi = bookHisto1D( 3, 1, 5); _hist_cont_KStarPlus = bookHisto1D( 4, 1, 1); _hist_Ups1_KStarPlus = bookHisto1D( 5, 1, 1); _hist_Ups4_KStarPlus = bookHisto1D( 6, 1, 1); _hist_cont_KStar0 = bookHisto1D( 7, 1, 1); _hist_Ups1_KStar0 = bookHisto1D( 8, 1, 1); _hist_Ups4_KStar0 = bookHisto1D( 9, 1, 1); _hist_cont_Rho0 = bookHisto1D(10, 1, 1); _hist_Ups1_Rho0 = bookHisto1D(11, 1, 1); _hist_Ups4_Rho0 = bookHisto1D(12, 1, 1); _hist_cont_Omega = bookHisto1D(13, 1, 1); _hist_Ups1_Omega = bookHisto1D(14, 1, 1); } void analyze(const Event& e) { const double weight = e.weight(); // Find the upsilons Particles upsilons; // First in unstable final state - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) if (p.pid() == 300553 || p.pid() == 553) upsilons.push_back(p); // Then in whole event if that failed if (upsilons.empty()) { foreach (const GenParticle* p, Rivet::particles(e.genEvent())) { if (p->pdg_id() != 300553 && p->pdg_id() != 553) continue; const GenVertex* pv = p->production_vertex(); bool passed = true; if (pv) { foreach (const GenParticle* pp, particles_in(pv)) { if ( p->pdg_id() == pp->pdg_id() ) { passed = false; break; } } } if (passed) upsilons.push_back(Particle(*p)); } } if (upsilons.empty()) { // continuum _weightSum_cont += weight; unsigned int nOmega(0), nRho0(0), nKStar0(0), nKStarPlus(0), nPhi(0); foreach (const Particle& p, ufs.particles()) { int id = p.abspid(); double xp = 2.*p.E()/sqrtS(); double beta = p.p3().mod()/p.E(); if (id == 113) { _hist_cont_Rho0->fill(xp, weight/beta); ++nRho0; } else if (id == 313) { _hist_cont_KStar0->fill(xp, weight/beta); ++nKStar0; } else if (id == 223) { _hist_cont_Omega->fill(xp, weight/beta); ++nOmega; } else if (id == 323) { _hist_cont_KStarPlus->fill(xp,weight/beta); ++nKStarPlus; } else if (id == 333) { ++nPhi; } } /// @todo Replace with Counters and fill one-point Scatters at the end _mult_cont_Omega ->fill(10.45, weight*nOmega ); _mult_cont_Rho0 ->fill(10.45, weight*nRho0 ); _mult_cont_KStar0 ->fill(10.45, weight*nKStar0 ); _mult_cont_KStarPlus->fill(10.45, weight*nKStarPlus); _mult_cont_Phi ->fill(10.45, weight*nPhi ); } else { // found an upsilon foreach (const Particle& ups, upsilons) { const int parentId = ups.pid(); (parentId == 553 ? _weightSum_Ups1 : _weightSum_Ups4) += weight; Particles unstable; // Find the decay products we want findDecayProducts(ups.genParticle(),unstable); /// @todo Update to new LT mk* functions LorentzTransform cms_boost; if (ups.p3().mod() > 0.001) cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec()); double mass = ups.mass(); unsigned int nOmega(0),nRho0(0),nKStar0(0),nKStarPlus(0),nPhi(0); foreach(const Particle & p , unstable) { int id = p.abspid(); FourMomentum p2 = cms_boost.transform(p.momentum()); double xp = 2.*p2.E()/mass; double beta = p2.p3().mod()/p2.E(); if (id == 113) { if (parentId == 553) _hist_Ups1_Rho0->fill(xp,weight/beta); else _hist_Ups4_Rho0->fill(xp,weight/beta); ++nRho0; } else if (id == 313) { if (parentId == 553) _hist_Ups1_KStar0->fill(xp,weight/beta); else _hist_Ups4_KStar0->fill(xp,weight/beta); ++nKStar0; } else if (id == 223) { if (parentId == 553) _hist_Ups1_Omega->fill(xp,weight/beta); ++nOmega; } else if (id == 323) { if (parentId == 553) _hist_Ups1_KStarPlus->fill(xp,weight/beta); else _hist_Ups4_KStarPlus->fill(xp,weight/beta); ++nKStarPlus; } else if (id == 333) { ++nPhi; } } if (parentId == 553) { _mult_Ups1_Omega ->fill(9.46,weight*nOmega ); _mult_Ups1_Rho0 ->fill(9.46,weight*nRho0 ); _mult_Ups1_KStar0 ->fill(9.46,weight*nKStar0 ); _mult_Ups1_KStarPlus->fill(9.46,weight*nKStarPlus); _mult_Ups1_Phi ->fill(9.46,weight*nPhi ); } else { _mult_Ups4_Omega ->fill(10.58,weight*nOmega ); _mult_Ups4_Rho0 ->fill(10.58,weight*nRho0 ); _mult_Ups4_KStar0 ->fill(10.58,weight*nKStar0 ); _mult_Ups4_KStarPlus->fill(10.58,weight*nKStarPlus); _mult_Ups4_Phi ->fill(10.58,weight*nPhi ); } } } } void finalize() { if (_weightSum_cont > 0.) { /// @todo Replace with Counters and fill one-point Scatters at the end scale(_mult_cont_Omega , 1./_weightSum_cont); scale(_mult_cont_Rho0 , 1./_weightSum_cont); scale(_mult_cont_KStar0 , 1./_weightSum_cont); scale(_mult_cont_KStarPlus, 1./_weightSum_cont); scale(_mult_cont_Phi , 1./_weightSum_cont); scale(_hist_cont_KStarPlus, 1./_weightSum_cont); scale(_hist_cont_KStar0 , 1./_weightSum_cont); scale(_hist_cont_Rho0 , 1./_weightSum_cont); scale(_hist_cont_Omega , 1./_weightSum_cont); } if (_weightSum_Ups1 > 0.) { /// @todo Replace with Counters and fill one-point Scatters at the end scale(_mult_Ups1_Omega , 1./_weightSum_Ups1); scale(_mult_Ups1_Rho0 , 1./_weightSum_Ups1); scale(_mult_Ups1_KStar0 , 1./_weightSum_Ups1); scale(_mult_Ups1_KStarPlus, 1./_weightSum_Ups1); scale(_mult_Ups1_Phi , 1./_weightSum_Ups1); scale(_hist_Ups1_KStarPlus, 1./_weightSum_Ups1); scale(_hist_Ups1_KStar0 , 1./_weightSum_Ups1); scale(_hist_Ups1_Rho0 , 1./_weightSum_Ups1); scale(_hist_Ups1_Omega , 1./_weightSum_Ups1); } if (_weightSum_Ups4 > 0.) { /// @todo Replace with Counters and fill one-point Scatters at the end scale(_mult_Ups4_Omega , 1./_weightSum_Ups4); scale(_mult_Ups4_Rho0 , 1./_weightSum_Ups4); scale(_mult_Ups4_KStar0 , 1./_weightSum_Ups4); scale(_mult_Ups4_KStarPlus, 1./_weightSum_Ups4); scale(_mult_Ups4_Phi , 1./_weightSum_Ups4); scale(_hist_Ups4_KStarPlus, 1./_weightSum_Ups4); scale(_hist_Ups4_KStar0 , 1./_weightSum_Ups4); scale(_hist_Ups4_Rho0 , 1./_weightSum_Ups4); } } private: //@{ Histo1DPtr _mult_cont_Omega, _mult_cont_Rho0, _mult_cont_KStar0, _mult_cont_KStarPlus, _mult_cont_Phi; Histo1DPtr _mult_Ups1_Omega, _mult_Ups1_Rho0, _mult_Ups1_KStar0, _mult_Ups1_KStarPlus, _mult_Ups1_Phi; Histo1DPtr _mult_Ups4_Omega, _mult_Ups4_Rho0, _mult_Ups4_KStar0, _mult_Ups4_KStarPlus, _mult_Ups4_Phi; Histo1DPtr _hist_cont_KStarPlus, _hist_Ups1_KStarPlus, _hist_Ups4_KStarPlus; Histo1DPtr _hist_cont_KStar0, _hist_Ups1_KStar0, _hist_Ups4_KStar0 ; Histo1DPtr _hist_cont_Rho0, _hist_Ups1_Rho0, _hist_Ups4_Rho0; Histo1DPtr _hist_cont_Omega, _hist_Ups1_Omega; double _weightSum_cont,_weightSum_Ups1,_weightSum_Ups4; //@} void findDecayProducts(const GenParticle* p, Particles& unstable) { const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { int id = abs((*pp)->pdg_id()); if (id == 113 || id == 313 || id == 323 || id == 333 || id == 223 ) { unstable.push_back(Particle(*pp)); } else if ((*pp)->end_vertex()) findDecayProducts(*pp, unstable); } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ARGUS_1993_S2789213); } diff --git a/analyses/pluginMisc/BABAR_2003_I593379.cc b/analyses/pluginMisc/BABAR_2003_I593379.cc --- a/analyses/pluginMisc/BABAR_2003_I593379.cc +++ b/analyses/pluginMisc/BABAR_2003_I593379.cc @@ -1,186 +1,186 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Babar charmonium spectra /// @author Peter Richardson class BABAR_2003_I593379 : public Analysis { public: BABAR_2003_I593379() : Analysis("BABAR_2003_I593379"), _weightSum(0.) { } void analyze(const Event& e) { const double weight = e.weight(); // Find the charmonia Particles upsilons; // First in unstable final state - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) if (p.pid() == 300553) upsilons.push_back(p); // Then in whole event if fails if (upsilons.empty()) { foreach (const GenParticle* p, Rivet::particles(e.genEvent())) { if (p->pdg_id() != 300553) continue; const GenVertex* pv = p->production_vertex(); bool passed = true; if (pv) { foreach (const GenParticle* pp, particles_in(pv)) { if ( p->pdg_id() == pp->pdg_id() ) { passed = false; break; } } } if (passed) upsilons.push_back(Particle(*p)); } } // Find upsilons foreach (const Particle& p, upsilons) { _weightSum += weight; // Find the charmonium resonances /// @todo Use Rivet::Particles vector allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2; findDecayProducts(p.genParticle(), allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2); const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.mom().betaVec()); for (size_t i = 0; i < allJpsi.size(); i++) { const double pcm = cms_boost.transform(FourMomentum(allJpsi[i]->momentum())).p(); _hist_all_Jpsi->fill(pcm, weight); } _mult_JPsi->fill(10.58, weight*double(allJpsi.size())); for (size_t i = 0; i < primaryJpsi.size(); i++) { const double pcm = cms_boost.transform(FourMomentum(primaryJpsi[i]->momentum())).p(); _hist_primary_Jpsi->fill(pcm, weight); } _mult_JPsi_direct->fill(10.58, weight*double(primaryJpsi.size())); for (size_t i=0; imomentum())).p(); _hist_Psi_prime->fill(pcm, weight); } _mult_Psi2S->fill(10.58, weight*double(Psiprime.size())); for (size_t i = 0; i < all_chi_c1.size(); i++) { const double pcm = cms_boost.transform(FourMomentum(all_chi_c1[i]->momentum())).p(); _hist_chi_c1->fill(pcm, weight); } _mult_chi_c1->fill(10.58, weight*double(all_chi_c1.size())); _mult_chi_c1_direct->fill(10.58, weight*double(primary_chi_c1.size())); for (size_t i = 0; i < all_chi_c2.size(); i++) { const double pcm = cms_boost.transform(FourMomentum(all_chi_c2[i]->momentum())).p(); _hist_chi_c2->fill(pcm, weight); } _mult_chi_c2->fill(10.58, weight*double(all_chi_c2.size())); _mult_chi_c2_direct->fill(10.58, weight*double(primary_chi_c2.size())); } } // analyze void finalize() { scale(_hist_all_Jpsi , 0.5*0.1/_weightSum); scale(_hist_chi_c1 , 0.5*0.1/_weightSum); scale(_hist_chi_c2 , 0.5*0.1/_weightSum); scale(_hist_Psi_prime , 0.5*0.1/_weightSum); scale(_hist_primary_Jpsi , 0.5*0.1/_weightSum); scale(_mult_JPsi , 0.5*100./_weightSum); scale(_mult_JPsi_direct , 0.5*100./_weightSum); scale(_mult_chi_c1 , 0.5*100./_weightSum); scale(_mult_chi_c1_direct, 0.5*100./_weightSum); scale(_mult_chi_c2 , 0.5*100./_weightSum); scale(_mult_chi_c2_direct, 0.5*100./_weightSum); scale(_mult_Psi2S , 0.5*100./_weightSum); } // finalize void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _mult_JPsi = bookHisto1D(1, 1, 1); _mult_JPsi_direct = bookHisto1D(1, 1, 2); _mult_chi_c1 = bookHisto1D(1, 1, 3); _mult_chi_c1_direct = bookHisto1D(1, 1, 4); _mult_chi_c2 = bookHisto1D(1, 1, 5); _mult_chi_c2_direct = bookHisto1D(1, 1, 6); _mult_Psi2S = bookHisto1D(1, 1, 7); _hist_all_Jpsi = bookHisto1D(6, 1, 1); _hist_chi_c1 = bookHisto1D(7, 1, 1); _hist_chi_c2 = bookHisto1D(7, 1, 2); _hist_Psi_prime = bookHisto1D(8, 1, 1); _hist_primary_Jpsi = bookHisto1D(10, 1, 1); } // init private: //@{ // count of weights double _weightSum; /// Histograms Histo1DPtr _hist_all_Jpsi; Histo1DPtr _hist_chi_c1; Histo1DPtr _hist_chi_c2; Histo1DPtr _hist_Psi_prime; Histo1DPtr _hist_primary_Jpsi; Histo1DPtr _mult_JPsi; Histo1DPtr _mult_JPsi_direct; Histo1DPtr _mult_chi_c1; Histo1DPtr _mult_chi_c1_direct; Histo1DPtr _mult_chi_c2; Histo1DPtr _mult_chi_c2_direct; Histo1DPtr _mult_Psi2S; //@} void findDecayProducts(const GenParticle* p, vector& allJpsi, vector& primaryJpsi, vector& Psiprime, vector& all_chi_c1, vector& all_chi_c2, vector& primary_chi_c1, vector& primary_chi_c2) { const GenVertex* dv = p->end_vertex(); bool isOnium = false; /// @todo Use better looping for (GenVertex::particles_in_const_iterator pp = dv->particles_in_const_begin() ; pp != dv->particles_in_const_end() ; ++pp) { int id = (*pp)->pdg_id(); id = id%1000; id -= id%10; id /= 10; if (id==44) isOnium = true; } /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { int id = (*pp)->pdg_id(); if (id==100443) { Psiprime.push_back(*pp); } else if (id==20443) { all_chi_c1.push_back(*pp); if (!isOnium) primary_chi_c1.push_back(*pp); } else if (id==445) { all_chi_c2.push_back(*pp); if (!isOnium) primary_chi_c2.push_back(*pp); } else if (id==443) { allJpsi.push_back(*pp); if (!isOnium) primaryJpsi.push_back(*pp); } if ((*pp)->end_vertex()) { findDecayProducts(*pp, allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2); } } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BABAR_2003_I593379); } diff --git a/analyses/pluginMisc/BABAR_2005_S6181155.cc b/analyses/pluginMisc/BABAR_2005_S6181155.cc --- a/analyses/pluginMisc/BABAR_2005_S6181155.cc +++ b/analyses/pluginMisc/BABAR_2005_S6181155.cc @@ -1,145 +1,145 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief BABAR Xi_c baryons from fragmentation /// @author Peter Richardson class BABAR_2005_S6181155 : public Analysis { public: BABAR_2005_S6181155() : Analysis("BABAR_2005_S6181155") { } void init() { declare(Beam(), "Beams"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histOnResonanceA = bookHisto1D(1,1,1); _histOnResonanceB = bookHisto1D(2,1,1); _histOffResonance = bookHisto1D(2,1,2); _sigma = bookHisto1D(3,1,1); _histOnResonanceA_norm = bookHisto1D(4,1,1); _histOnResonanceB_norm = bookHisto1D(5,1,1); _histOffResonance_norm = bookHisto1D(5,1,2); } void analyze(const Event& e) { const double weight = e.weight(); // Loop through unstable FS particles and look for charmed mesons/baryons - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); const Beam beamproj = apply(e, "Beams"); const ParticlePair& beams = beamproj.beams(); const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum(); const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec()); const double s = sqr(beamproj.sqrtS()); const bool onresonance = fuzzyEquals(beamproj.sqrtS()/GeV, 10.58, 2E-3); foreach (const Particle& p, ufs.particles()) { // 3-momentum in CMS frame const double mom = cms_boost.transform(p.momentum()).vector3().mod(); // Only looking at Xi_c^0 if (p.abspid() != 4132 ) continue; if (onresonance) { _histOnResonanceA_norm->fill(mom,weight); _histOnResonanceB_norm->fill(mom,weight); } else { _histOffResonance_norm->fill(mom,s/sqr(10.58)*weight); } MSG_DEBUG("mom = " << mom); // off-resonance cross section if (checkDecay(p.genParticle())) { if (onresonance) { _histOnResonanceA->fill(mom,weight); _histOnResonanceB->fill(mom,weight); } else { _histOffResonance->fill(mom,s/sqr(10.58)*weight); _sigma->fill(10.6,weight); } } } } void finalize() { scale(_histOnResonanceA, crossSection()/femtobarn/sumOfWeights()); scale(_histOnResonanceB, crossSection()/femtobarn/sumOfWeights()); scale(_histOffResonance, crossSection()/femtobarn/sumOfWeights()); scale(_sigma , crossSection()/femtobarn/sumOfWeights()); normalize(_histOnResonanceA_norm); normalize(_histOnResonanceB_norm); normalize(_histOffResonance_norm); } private: //@{ /// Histograms Histo1DPtr _histOnResonanceA; Histo1DPtr _histOnResonanceB; Histo1DPtr _histOffResonance; Histo1DPtr _sigma ; Histo1DPtr _histOnResonanceA_norm; Histo1DPtr _histOnResonanceB_norm; Histo1DPtr _histOffResonance_norm; //@} bool checkDecay(const GenParticle* p) { unsigned int nstable = 0, npip = 0, npim = 0; unsigned int nXim = 0, nXip = 0; findDecayProducts(p, nstable, npip, npim, nXip, nXim); int id = p->pdg_id(); // Xi_c if (id == 4132) { if (nstable == 2 && nXim == 1 && npip == 1) return true; } else if (id == -4132) { if (nstable == 2 && nXip == 1 && npim == 1) return true; } return false; } void findDecayProducts(const GenParticle* p, unsigned int& nstable, unsigned int& npip, unsigned int& npim, unsigned int& nXip, unsigned int& nXim) { const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { int id = (*pp)->pdg_id(); if (id==3312) { ++nXim; ++nstable; } else if (id == -3312) { ++nXip; ++nstable; } else if(id == 111 || id == 221) { ++nstable; } else if ((*pp)->end_vertex()) { findDecayProducts(*pp, nstable, npip, npim, nXip, nXim); } else { if (id != 22) ++nstable; if (id == 211) ++npip; else if(id == -211) ++npim; } } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BABAR_2005_S6181155); } diff --git a/analyses/pluginMisc/BABAR_2007_S6895344.cc b/analyses/pluginMisc/BABAR_2007_S6895344.cc --- a/analyses/pluginMisc/BABAR_2007_S6895344.cc +++ b/analyses/pluginMisc/BABAR_2007_S6895344.cc @@ -1,86 +1,86 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief BABAR Lambda_c from fragmentation /// @author Peter Richardson class BABAR_2007_S6895344 : public Analysis { public: BABAR_2007_S6895344() : Analysis("BABAR_2007_S6895344") { } void init() { declare(Beam(), "Beams"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histOff = bookHisto1D(1,1,1); _sigmaOff = bookHisto1D(2,1,1); _histOn = bookHisto1D(3,1,1); _sigmaOn = bookHisto1D(4,1,1); } void analyze(const Event& e) { const double weight = e.weight(); // Loop through unstable FS particles and look for charmed mesons/baryons - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); const Beam beamproj = apply(e, "Beams"); const ParticlePair& beams = beamproj.beams(); const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum(); const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec()); const double s = sqr(beamproj.sqrtS()); const bool onresonance = fuzzyEquals(beamproj.sqrtS(), 10.58, 2E-3); // Particle masses from PDGlive (accessed online 16. Nov. 2009). foreach (const Particle& p, ufs.particles()) { // Only looking at Lambda_c if (p.abspid() != 4122) continue; MSG_DEBUG("Lambda_c found"); const double mH2 = 5.22780; // 2.28646^2 const double mom = FourMomentum(cms_boost.transform(p.momentum())).p(); const double xp = mom/sqrt(s/4.0 - mH2); if (onresonance) { _histOn ->fill(xp,weight); _sigmaOn ->fill(10.58, weight); } else { _histOff ->fill(xp,weight); _sigmaOff->fill(10.54, weight); } } } void finalize() { scale(_sigmaOn , 1./sumOfWeights()); scale(_sigmaOff, 1./sumOfWeights()); scale(_histOn , 1./sumOfWeights()); scale(_histOff , 1./sumOfWeights()); } private: //@{ // Histograms for the continuum cross sections Histo1DPtr _sigmaOn ; Histo1DPtr _sigmaOff; Histo1DPtr _histOn ; Histo1DPtr _histOff ; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BABAR_2007_S6895344); } diff --git a/analyses/pluginMisc/BABAR_2007_S7266081.cc b/analyses/pluginMisc/BABAR_2007_S7266081.cc --- a/analyses/pluginMisc/BABAR_2007_S7266081.cc +++ b/analyses/pluginMisc/BABAR_2007_S7266081.cc @@ -1,181 +1,181 @@ // -*- C++ -*- #include #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief BABAR tau lepton to three charged hadrons /// @author Peter Richardson class BABAR_2007_S7266081 : public Analysis { public: BABAR_2007_S7266081() : Analysis("BABAR_2007_S7266081"), _weight_total(0), _weight_pipipi(0), _weight_Kpipi(0), _weight_KpiK(0), _weight_KKK(0) { } void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _hist_pipipi_pipipi = bookHisto1D( 1, 1, 1); _hist_pipipi_pipi = bookHisto1D( 2, 1, 1); _hist_Kpipi_Kpipi = bookHisto1D( 3, 1, 1); _hist_Kpipi_Kpi = bookHisto1D( 4, 1, 1); _hist_Kpipi_pipi = bookHisto1D( 5, 1, 1); _hist_KpiK_KpiK = bookHisto1D( 6, 1, 1); _hist_KpiK_KK = bookHisto1D( 7, 1, 1); _hist_KpiK_piK = bookHisto1D( 8, 1, 1); _hist_KKK_KKK = bookHisto1D( 9, 1, 1); _hist_KKK_KK = bookHisto1D(10, 1, 1); } void analyze(const Event& e) { double weight = e.weight(); // Find the taus Particles taus; - foreach(const Particle& p, apply(e, "UFS").particles(Cuts::pid==PID::TAU)) { + foreach(const Particle& p, apply(e, "UFS").particles(Cuts::pid==PID::TAU)) { _weight_total += weight; Particles pip, pim, Kp, Km; unsigned int nstable = 0; // Get the boost to the rest frame LorentzTransform cms_boost; if (p.p3().mod() > 1*MeV) cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec()); // Find the decay products we want findDecayProducts(p.genParticle(), nstable, pip, pim, Kp, Km); if (p.pid() < 0) { swap(pip, pim); swap(Kp, Km ); } if (nstable != 4) continue; // pipipi if (pim.size() == 2 && pip.size() == 1) { _weight_pipipi += weight; _hist_pipipi_pipipi-> fill((pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass(), weight); _hist_pipipi_pipi-> fill((pip[0].momentum()+pim[0].momentum()).mass(), weight); _hist_pipipi_pipi-> fill((pip[0].momentum()+pim[1].momentum()).mass(), weight); } else if (pim.size() == 1 && pip.size() == 1 && Km.size() == 1) { _weight_Kpipi += weight; _hist_Kpipi_Kpipi-> fill((pim[0].momentum()+pip[0].momentum()+Km[0].momentum()).mass(), weight); _hist_Kpipi_Kpi-> fill((pip[0].momentum()+Km[0].momentum()).mass(), weight); _hist_Kpipi_pipi-> fill((pim[0].momentum()+pip[0].momentum()).mass(), weight); } else if (Kp.size() == 1 && Km.size() == 1 && pim.size() == 1) { _weight_KpiK += weight; _hist_KpiK_KpiK-> fill((Kp[0].momentum()+Km[0].momentum()+pim[0].momentum()).mass(), weight); _hist_KpiK_KK-> fill((Kp[0].momentum()+Km[0].momentum()).mass(), weight); _hist_KpiK_piK-> fill((Kp[0].momentum()+pim[0].momentum()).mass(), weight); } else if (Kp.size() == 1 && Km.size() == 2) { _weight_KKK += weight; _hist_KKK_KKK-> fill((Kp[0].momentum()+Km[0].momentum()+Km[1].momentum()).mass(), weight); _hist_KKK_KK-> fill((Kp[0].momentum()+Km[0].momentum()).mass(), weight); _hist_KKK_KK-> fill((Kp[0].momentum()+Km[1].momentum()).mass(), weight); } } } void finalize() { if (_weight_pipipi > 0.) { scale(_hist_pipipi_pipipi, 1.0/_weight_pipipi); scale(_hist_pipipi_pipi , 0.5/_weight_pipipi); } if (_weight_Kpipi > 0.) { scale(_hist_Kpipi_Kpipi , 1.0/_weight_Kpipi); scale(_hist_Kpipi_Kpi , 1.0/_weight_Kpipi); scale(_hist_Kpipi_pipi , 1.0/_weight_Kpipi); } if (_weight_KpiK > 0.) { scale(_hist_KpiK_KpiK , 1.0/_weight_KpiK); scale(_hist_KpiK_KK , 1.0/_weight_KpiK); scale(_hist_KpiK_piK , 1.0/_weight_KpiK); } if (_weight_KKK > 0.) { scale(_hist_KKK_KKK , 1.0/_weight_KKK); scale(_hist_KKK_KK , 0.5/_weight_KKK); } /// @note Using autobooking for these scatters since their x values are not really obtainable from the MC data bookScatter2D(11, 1, 1, true)->point(0).setY(100*_weight_pipipi/_weight_total, 100*sqrt(_weight_pipipi)/_weight_total); bookScatter2D(12, 1, 1, true)->point(0).setY(100*_weight_Kpipi/_weight_total, 100*sqrt(_weight_Kpipi)/_weight_total); bookScatter2D(13, 1, 1, true)->point(0).setY(100*_weight_KpiK/_weight_total, 100*sqrt(_weight_KpiK)/_weight_total); bookScatter2D(14, 1, 1, true)->point(0).setY(100*_weight_KKK/_weight_total, 100*sqrt(_weight_KKK)/_weight_total); } private: //@{ // Histograms Histo1DPtr _hist_pipipi_pipipi, _hist_pipipi_pipi; Histo1DPtr _hist_Kpipi_Kpipi, _hist_Kpipi_Kpi, _hist_Kpipi_pipi; Histo1DPtr _hist_KpiK_KpiK, _hist_KpiK_KK, _hist_KpiK_piK; Histo1DPtr _hist_KKK_KKK, _hist_KKK_KK; // Weights counters double _weight_total, _weight_pipipi, _weight_Kpipi, _weight_KpiK, _weight_KKK; //@} void findDecayProducts(const GenParticle* p, unsigned int & nstable, Particles& pip, Particles& pim, Particles& Kp, Particles& Km) { const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { int id = (*pp)->pdg_id(); if (id == PID::PI0 ) ++nstable; else if (id == PID::K0S) ++nstable; else if (id == PID::PIPLUS) { pip.push_back(Particle(**pp)); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(Particle(**pp)); ++nstable; } else if (id == PID::KPLUS) { Kp.push_back(Particle(**pp)); ++nstable; } else if (id == PID::KMINUS) { Km.push_back(Particle(**pp)); ++nstable; } else if ((*pp)->end_vertex()) { findDecayProducts(*pp, nstable, pip, pim, Kp, Km); } else ++nstable; } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BABAR_2007_S7266081); } diff --git a/analyses/pluginMisc/BABAR_2013_I1116411.cc b/analyses/pluginMisc/BABAR_2013_I1116411.cc --- a/analyses/pluginMisc/BABAR_2013_I1116411.cc +++ b/analyses/pluginMisc/BABAR_2013_I1116411.cc @@ -1,84 +1,84 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BABAR_2013_I1116411 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BABAR_2013_I1116411); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_q2 = bookHisto1D(1, 1, 1); } // Calculate the Q2 using mother and daughter charged lepton double q2(const Particle& B) { const Particle chlept = filter_select(B.children(), Cuts::pid==PID::POSITRON || Cuts::pid==PID::ANTIMUON)[0]; FourMomentum q = B.mom() - chlept.mom(); return q*q; } // Check for explicit decay into pdgids bool isSemileptonicDecay(const Particle& mother, vector ids) { // Trivial check to ignore any other decays but the one in question modulo photons const Particles children = mother.children(Cuts::pid!=PID::PHOTON); if (children.size()!=ids.size()) return false; // Check for the explicit decay return all(ids, [&](int i){return count(children, hasPID(i))==1;}); } /// Perform the per-event analysis void analyze(const Event& event) { // Get B+ Mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::BPLUS)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::BPLUS)) { if (isSemileptonicDecay(p, {PID::OMEGA, PID::POSITRON, PID::NU_E}) || isSemileptonicDecay(p, {PID::OMEGA, PID::ANTIMUON, PID::NU_MU})) { _h_q2->fill(q2(p), event.weight()); } } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_q2, 1.21); // normalize to BF } //@} private: /// @name Histograms //@{ Histo1DPtr _h_q2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BABAR_2013_I1116411); } diff --git a/analyses/pluginMisc/BABAR_2015_I1334693.cc b/analyses/pluginMisc/BABAR_2015_I1334693.cc --- a/analyses/pluginMisc/BABAR_2015_I1334693.cc +++ b/analyses/pluginMisc/BABAR_2015_I1334693.cc @@ -1,83 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BABAR_2015_I1334693 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BABAR_2015_I1334693); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_q2 = bookHisto1D(1, 1, 1); } // Calculate the Q2 using mother and daugher meson double q2(const Particle& B, int mesonID) { FourMomentum q = B.mom() - filter_select(B.children(), Cuts::pid==mesonID)[0]; return q*q; } // Check for explicit decay into pdgids bool isSemileptonicDecay(const Particle& mother, vector ids) { // Trivial check to ignore any other decays but the one in question modulo photons const Particles children = mother.children(Cuts::pid!=PID::PHOTON); if (children.size()!=ids.size()) return false; // Check for the explicit decay return all(ids, [&](int i){return count(children, hasPID(i))==1;}); } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over D0 mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::D0)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::D0)) { if (isSemileptonicDecay(p, {PID::PIMINUS, PID::POSITRON, PID::NU_E})) { _h_q2->fill(q2(p, PID::PIMINUS), event.weight()); } } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_q2, 375.4); // normalize to data } //@} private: /// @name Histograms //@{ Histo1DPtr _h_q2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BABAR_2015_I1334693); } diff --git a/analyses/pluginMisc/BELLE_2001_S4598261.cc b/analyses/pluginMisc/BELLE_2001_S4598261.cc --- a/analyses/pluginMisc/BELLE_2001_S4598261.cc +++ b/analyses/pluginMisc/BELLE_2001_S4598261.cc @@ -1,106 +1,106 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief BELLE pi0 spectrum at Upsilon(4S) /// @author Peter Richardson class BELLE_2001_S4598261 : public Analysis { public: BELLE_2001_S4598261() : Analysis("BELLE_2001_S4598261"), _weightSum(0.) { } void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _histdSigDp = bookHisto1D(1, 1, 1); // spectrum _histMult = bookHisto1D(2, 1, 1); // multiplicity } void analyze(const Event& e) { const double weight = e.weight(); // Find the upsilons Particles upsilons; // First in unstable final state - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) if (p.pid()==300553) upsilons.push_back(p); // Then in whole event if fails if (upsilons.empty()) { foreach (const GenParticle* p, Rivet::particles(e.genEvent())) { if (p->pdg_id() != 300553) continue; const GenVertex* pv = p->production_vertex(); bool passed = true; if (pv) { /// @todo Use better looping for (GenVertex::particles_in_const_iterator pp = pv->particles_in_const_begin() ; pp != pv->particles_in_const_end() ; ++pp) { if ( p->pdg_id() == (*pp)->pdg_id() ) { passed = false; break; } } } if (passed) upsilons.push_back(Particle(p)); } } // Find upsilons foreach (const Particle& p, upsilons) { _weightSum += weight; // Find the neutral pions from the decay vector pions; findDecayProducts(p.genParticle(), pions); const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec()); for (size_t ix=0; ixmomentum())).p(); _histdSigDp->fill(pcm,weight); } _histMult->fill(0., pions.size()*weight); } } void finalize() { scale(_histdSigDp, 1./_weightSum); scale(_histMult , 1./_weightSum); } private: //@{ // count of weights double _weightSum; /// Histograms Histo1DPtr _histdSigDp; Histo1DPtr _histMult; //@} void findDecayProducts(const GenParticle* p, vector& pions) { const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { const int id = (*pp)->pdg_id(); if (id == 111) { pions.push_back(*pp); } else if ((*pp)->end_vertex()) findDecayProducts(*pp, pions); } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BELLE_2001_S4598261); } diff --git a/analyses/pluginMisc/BELLE_2008_I786560.cc b/analyses/pluginMisc/BELLE_2008_I786560.cc --- a/analyses/pluginMisc/BELLE_2008_I786560.cc +++ b/analyses/pluginMisc/BELLE_2008_I786560.cc @@ -1,112 +1,112 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief BELLE tau lepton to pi pi /// @author Peter Richardson class BELLE_2008_I786560 : public Analysis { public: BELLE_2008_I786560() : Analysis("BELLE_2008_I786560"), _weight_total(0), _weight_pipi(0) { } void init() { - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); _hist_pipi = bookHisto1D( 1, 1, 1); } void analyze(const Event& e) { // Find the taus Particles taus; - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); foreach (const Particle& p, ufs.particles()) { if (p.abspid() != PID::TAU) continue; _weight_total += 1.; Particles pip, pim, pi0; unsigned int nstable = 0; // get the boost to the rest frame LorentzTransform cms_boost; if (p.p3().mod() > 1*MeV) cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec()); // find the decay products we want findDecayProducts(p.genParticle(), nstable, pip, pim, pi0); if (p.pid() < 0) { swap(pip, pim); } if (nstable != 3) continue; // pipi if (pim.size() == 1 && pi0.size() == 1) { _weight_pipi += 1.; _hist_pipi->fill((pi0[0].momentum()+pim[0].momentum()).mass2(),1.); } } } void finalize() { if (_weight_pipi > 0.) scale(_hist_pipi, 1./_weight_pipi); } private: //@{ // Histograms Histo1DPtr _hist_pipi; // Weights counters double _weight_total, _weight_pipi; //@} void findDecayProducts(const GenParticle* p, unsigned int & nstable, Particles& pip, Particles& pim, Particles& pi0) { const GenVertex* dv = p->end_vertex(); /// @todo Use better looping for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) { int id = (*pp)->pdg_id(); if (id == PID::PI0 ) { pi0.push_back(Particle(**pp)); ++nstable; } else if (id == PID::K0S) ++nstable; else if (id == PID::PIPLUS) { pip.push_back(Particle(**pp)); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(Particle(**pp)); ++nstable; } else if (id == PID::KPLUS) { ++nstable; } else if (id == PID::KMINUS) { ++nstable; } else if ((*pp)->end_vertex()) { findDecayProducts(*pp, nstable, pip, pim, pi0); } else ++nstable; } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BELLE_2008_I786560); } diff --git a/analyses/pluginMisc/BELLE_2011_I878990.cc b/analyses/pluginMisc/BELLE_2011_I878990.cc --- a/analyses/pluginMisc/BELLE_2011_I878990.cc +++ b/analyses/pluginMisc/BELLE_2011_I878990.cc @@ -1,82 +1,82 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BELLE_2011_I878990 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BELLE_2011_I878990); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_q2 = bookHisto1D(1, 1, 1); } // Calculate the Q2 using mother and daugher meson double q2(const Particle& B, int mesonID) { FourMomentum q = B.mom() - filter_select(B.children(), Cuts::pid==mesonID)[0]; return q*q; } // Check for explicit decay into pdgids bool isSemileptonicDecay(const Particle& mother, vector ids) { // Trivial check to ignore any other decays but the one in question modulo photons const Particles children = mother.children(Cuts::pid!=PID::PHOTON); if (children.size()!=ids.size()) return false; // Check for the explicit decay return all(ids, [&](int i){return count(children, hasPID(i))==1;}); } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over B0 mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::B0)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::B0)) { if (isSemileptonicDecay(p, {PID::PIMINUS, PID::POSITRON, PID::NU_E}) || isSemileptonicDecay(p, {PID::PIMINUS, PID::ANTIMUON, PID::NU_MU})) { _h_q2->fill(q2(p, PID::PIMINUS), event.weight()); } } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_q2, 3000.86); // normalize to BF*dQ2 } //@} private: /// @name Histograms //@{ Histo1DPtr _h_q2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BELLE_2011_I878990); } diff --git a/analyses/pluginMisc/BELLE_2013_I1238273.cc b/analyses/pluginMisc/BELLE_2013_I1238273.cc --- a/analyses/pluginMisc/BELLE_2013_I1238273.cc +++ b/analyses/pluginMisc/BELLE_2013_I1238273.cc @@ -1,114 +1,114 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BELLE_2013_I1238273 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BELLE_2013_I1238273); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_q2_B0bar_pi = bookHisto1D(1, 1, 1); _h_q2_B0bar_rho = bookHisto1D(3, 1, 1); _h_q2_Bminus_pi = bookHisto1D(2, 1, 1); _h_q2_Bminus_rho = bookHisto1D(4, 1, 1); _h_q2_Bminus_omega = bookHisto1D(5, 1, 1); } // Calculate the Q2 using mother and daugher meson double q2(const Particle& B, int mesonID) { FourMomentum q = B.mom() - filter_select(B.children(), Cuts::pid==mesonID)[0]; return q*q; } // Check for explicit decay into pdgids bool isSemileptonicDecay(const Particle& mother, vector ids) { // Trivial check to ignore any other decays but the one in question modulo photons const Particles children = mother.children(Cuts::pid!=PID::PHOTON); if (children.size()!=ids.size()) return false; // Check for the explicit decay return all(ids, [&](int i){return count(children, hasPID(i))==1;}); } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over B0bar Mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::B0BAR)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::B0BAR)) { if (isSemileptonicDecay(p, {PID::PIPLUS, PID::ELECTRON, PID::NU_EBAR}) || isSemileptonicDecay(p, {PID::PIPLUS, PID::MUON, PID::NU_MUBAR})) { _h_q2_B0bar_pi->fill(q2(p, PID::PIPLUS), event.weight()); } if (isSemileptonicDecay(p, {PID::RHOPLUS, PID::ELECTRON, PID::NU_EBAR}) || isSemileptonicDecay(p, {PID::RHOPLUS, PID::MUON, PID::NU_MUBAR})) { _h_q2_B0bar_rho->fill(q2(p, PID::RHOPLUS), event.weight()); } } // Loop over B- Mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::BMINUS)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::BMINUS)) { if (isSemileptonicDecay(p, {PID::PI0, PID::ELECTRON, PID::NU_EBAR}) || isSemileptonicDecay(p, {PID::PI0, PID::MUON, PID::NU_MUBAR})) { _h_q2_Bminus_pi->fill(q2(p, PID::PI0), event.weight()); } if (isSemileptonicDecay(p, {PID::RHO0, PID::ELECTRON, PID::NU_EBAR}) || isSemileptonicDecay(p, {PID::RHO0, PID::MUON, PID::NU_MUBAR})) { _h_q2_Bminus_rho->fill(q2(p,PID::RHO0), event.weight()); } if (isSemileptonicDecay(p, {PID::OMEGA, PID::ELECTRON, PID::NU_EBAR}) || isSemileptonicDecay(p, {PID::OMEGA, PID::MUON, PID::NU_MUBAR})) { _h_q2_Bminus_omega->fill(q2(p, PID::OMEGA), event.weight()); } } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_q2_B0bar_pi , 298.8); // normalize to BF*dQ2 normalize(_h_q2_B0bar_rho , 1304.8); // normalize to BF*dQ2 normalize(_h_q2_Bminus_pi , 324.8); // normalize to BF*dQ2 normalize(_h_q2_Bminus_rho , 367.0); // normalize to BF*dQ2 normalize(_h_q2_Bminus_omega, 793.1); // normalize to BF*dQ2 } //@} private: /// @name Histograms //@{ Histo1DPtr _h_q2_B0bar_pi ; Histo1DPtr _h_q2_B0bar_rho ; Histo1DPtr _h_q2_Bminus_pi ; Histo1DPtr _h_q2_Bminus_rho ; Histo1DPtr _h_q2_Bminus_omega; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BELLE_2013_I1238273); } diff --git a/analyses/pluginMisc/BELLE_2015_I1397632.cc b/analyses/pluginMisc/BELLE_2015_I1397632.cc --- a/analyses/pluginMisc/BELLE_2015_I1397632.cc +++ b/analyses/pluginMisc/BELLE_2015_I1397632.cc @@ -1,98 +1,98 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BELLE_2015_I1397632 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BELLE_2015_I1397632); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_B_Denu = bookHisto1D(1, 1, 1); _h_B_Dmunu = bookHisto1D(1, 1, 2); _h_B_Deplusnu = bookHisto1D(1, 1, 3); _h_B_Dmuplusnu = bookHisto1D(1, 1, 4); } // Check for explicit decay into pdgids bool isSemileptonicDecay(const Particle& mother, vector ids) { // Trivial check to ignore any other decays but the one in question modulo photons const Particles children = mother.children(Cuts::pid!=PID::PHOTON); if (children.size()!=ids.size()) return false; // Check for the explicit decay return all(ids, [&](int i){return count(children, hasPID(i))==1;}); } // Calculate the recoil w using mother and daugher meson double recoilW(const Particle& B, int mesonID) { // TODO why does that not work with const? Particle D = filter_select(B.children(), Cuts::pid==mesonID)[0]; FourMomentum q = B.mom() - D.mom(); return (B.mom()*B.mom() + D.mom()*D.mom() - q*q )/ (2. * sqrt(B.mom()*B.mom()) * sqrt(D.mom()*D.mom()) ); } /// Perform the per-event analysis void analyze(const Event& event) { // Get B0 Mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::B0)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::B0)) { if (isSemileptonicDecay(p, {PID::DMINUS,PID::POSITRON,PID::NU_E})) _h_B_Denu->fill( recoilW(p, PID::DMINUS), event.weight()); if (isSemileptonicDecay(p, {PID::DMINUS,PID::ANTIMUON,PID::NU_MU})) _h_B_Dmunu->fill(recoilW(p, PID::DMINUS), event.weight()); } // Get B+ Mesons - foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::BPLUS)) { + foreach(const Particle& p, apply(event, "UFS").particles(Cuts::pid==PID::BPLUS)) { if (isSemileptonicDecay(p, {PID::D0BAR,PID::POSITRON,PID::NU_E})) _h_B_Deplusnu->fill( recoilW(p, PID::D0BAR), event.weight()); if (isSemileptonicDecay(p, {PID::D0BAR,PID::ANTIMUON,PID::NU_MU})) _h_B_Dmuplusnu->fill(recoilW(p, PID::D0BAR), event.weight()); } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_B_Denu); normalize(_h_B_Dmunu); normalize(_h_B_Deplusnu); normalize(_h_B_Dmuplusnu); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_B_Denu; Histo1DPtr _h_B_Dmunu; Histo1DPtr _h_B_Deplusnu; Histo1DPtr _h_B_Dmuplusnu; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BELLE_2015_I1397632); } diff --git a/analyses/pluginMisc/BELLE_2017_I1512299.cc b/analyses/pluginMisc/BELLE_2017_I1512299.cc --- a/analyses/pluginMisc/BELLE_2017_I1512299.cc +++ b/analyses/pluginMisc/BELLE_2017_I1512299.cc @@ -1,166 +1,166 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BELLE_2017_I1512299 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BELLE_2017_I1512299); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // Book histograms _h_w = bookHisto1D(1, 1, 1); _h_costhv = bookHisto1D(2, 1, 1); _h_costhl = bookHisto1D(3, 1, 1); _h_chi = bookHisto1D(4, 1, 1); } /// Perform the per-event analysis bool analyzeDecay(Particle mother, vector ids) { // There is no point in looking for decays with less particles than to be analysed if (mother.children().size() == ids.size()) { bool decayfound = true; for (int id : ids) { if (!contains(mother, id)) decayfound = false; } return decayfound; } return false; } bool contains(Particle& mother, int id) { return any(mother.children(), HasPID(id)); } double recoilW(const Particle& mother) { FourMomentum lepton, neutrino, meson, q; foreach(const Particle& c, mother.children()) { if (c.isNeutrino()) neutrino=c.mom(); if (c.isLepton() &! c.isNeutrino()) lepton =c.mom(); if (c.isHadron()) meson=c.mom(); } q = lepton + neutrino; //no hadron before double mb2= mother.mom()*mother.mom(); double mD2 = meson*meson; return (mb2 + mD2 - q*q )/ (2. * sqrt(mb2) * sqrt(mD2) ); } /// Perform the per-event analysis void analyze(const Event& event) { FourMomentum pl, pnu, pB, pD, pDs, ppi; // Iterate of B0bar mesons - for(const Particle& p : apply(event, "UFS").particles(Cuts::pid==-511)) { + for(const Particle& p : apply(event, "UFS").particles(Cuts::pid==-511)) { pB = p.momentum(); // Find semileptonic decays if (analyzeDecay(p, {PID::DSTARPLUS,-12,11}) || analyzeDecay(p, {PID::DSTARPLUS,-14,13}) ) { _h_w->fill(recoilW(p), event.weight()); // Get the necessary momenta for the angles bool foundDdecay=false; for (const Particle c : p.children()) { if ( (c.pid() == PID::DSTARPLUS) && (analyzeDecay(c, {PID::PIPLUS, PID::D0}) || analyzeDecay(c, {PID::PI0, PID::DPLUS})) ) { foundDdecay=true; pDs = c.momentum(); for (const Particle dc : c.children()) { if (dc.hasCharm()) pD = dc.momentum(); else ppi = dc.momentum(); } } if (c.pid() == 11 || c.pid() == 13) pl = c.momentum(); if (c.pid() == -12 || c.pid() == -14) pnu = c.momentum(); } // This is the angle analysis if (foundDdecay) { // First boost all relevant momenta into the B-rest frame const LorentzTransform B_boost = LorentzTransform::mkFrameTransformFromBeta(pB.betaVec()); // Momenta in B rest frame: FourMomentum lv_brest_Dstar = B_boost.transform(pDs);//lab2brest(gp_Dstar.particle.p()); FourMomentum lv_brest_w = B_boost.transform(pB - pDs); //lab2brest(p_lv_w); FourMomentum lv_brest_D = B_boost.transform(pD); //lab2brest(gp_D.particle.p()); FourMomentum lv_brest_lep = B_boost.transform(pl); //lab2brest(gp_lep.p()); const LorentzTransform Ds_boost = LorentzTransform::mkFrameTransformFromBeta(pDs.betaVec()); FourMomentum lv_Dstarrest_D = Ds_boost.transform(lv_brest_D); const LorentzTransform W_boost = LorentzTransform::mkFrameTransformFromBeta((pB-pDs).betaVec()); FourMomentum lv_wrest_lep = W_boost.transform(lv_brest_lep); double cos_thetaV = cos(lv_brest_Dstar.p3().angle(lv_Dstarrest_D.p3())); _h_costhv->fill(cos_thetaV, event.weight()); double cos_thetaL = cos(lv_brest_w.p3().angle(lv_wrest_lep.p3())); _h_costhl->fill(cos_thetaL, event.weight()); Vector3 LTrans = lv_wrest_lep.p3() - cos_thetaL*lv_wrest_lep.p3().perp()*lv_brest_w.p3().unit(); Vector3 VTrans = lv_Dstarrest_D.p3() - cos_thetaV*lv_Dstarrest_D.p3().perp()*lv_brest_Dstar.p3().unit(); float chi = atan2(LTrans.cross(VTrans).dot(lv_brest_w.p3().unit()), LTrans.dot(VTrans)); if(chi < 0) chi += TWOPI; _h_chi->fill(chi, event.weight()); //const LorentzTransform W_boost = LorentzTransform::mkFrameTransformFromBeta((pl+pnu).betaVec()); //const LorentzTransform D_boost = LorentzTransform::mkFrameTransformFromBeta((pD+ppi).betaVec()); //FourMomentum pl_t = FourMomentum(W_boost.transform(pl)); //FourMomentum pD_t = FourMomentum(D_boost.transform(pD)); //double thetal = (pl+pnu).angle(pl_t); //double thetav = (pD+ppi).angle(pD_t); //_h_costhv->fill(cos(thetav), event.weight()); //_h_costhl->fill(cos(thetal), event.weight()); } } } } //else if (analyzeDecay(p, {413,-14,13}) ) { //_h_w->fill(recoilW(p), event.weight()); //} /// Normalise histograms etc., after the run void finalize() { double GAMMA_B0 = 4.32e-13; // Total width in GeV, calculated from mean life time of 1.52 pico seconds double BR_B0_DSPLUS_ELL_NU = 0.0495; // Branching fraction from the same paper for B0bar to D*+ ell nu double NORM = GAMMA_B0 * BR_B0_DSPLUS_ELL_NU; // Normalise histos to partial width normalize(_h_w, NORM); normalize(_h_costhv, NORM); normalize(_h_costhl, NORM); normalize(_h_chi, NORM); } //@} /// @name Histograms //@{ Histo1DPtr _h_w; Histo1DPtr _h_costhv; Histo1DPtr _h_costhl; Histo1DPtr _h_chi; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BELLE_2017_I1512299); } diff --git a/analyses/pluginMisc/CLEO_2004_S5809304.cc b/analyses/pluginMisc/CLEO_2004_S5809304.cc --- a/analyses/pluginMisc/CLEO_2004_S5809304.cc +++ b/analyses/pluginMisc/CLEO_2004_S5809304.cc @@ -1,165 +1,165 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief CLEO charmed mesons and baryons from fragmentation /// @author Peter Richardson class CLEO_2004_S5809304 : public Analysis { public: DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_2004_S5809304); void init() { declare(Beam(), "Beams"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); // continuum cross sections _sigmaDPlus = bookHisto1D(1,1,1); _sigmaD0A = bookHisto1D(1,1,2); _sigmaD0B = bookHisto1D(1,1,3); _sigmaDStarPlusA = bookHisto1D(1,1,4); _sigmaDStarPlusB = bookHisto1D(1,1,5); _sigmaDStar0A = bookHisto1D(1,1,6); _sigmaDStar0B = bookHisto1D(1,1,7); // histograms for continuum data _histXpDplus = bookHisto1D(2, 1, 1); _histXpD0A = bookHisto1D(3, 1, 1); _histXpD0B = bookHisto1D(4, 1, 1); _histXpDStarPlusA = bookHisto1D(5, 1, 1); _histXpDStarPlusB = bookHisto1D(6, 1, 1); _histXpDStar0A = bookHisto1D(7, 1, 1); _histXpDStar0B = bookHisto1D(8, 1, 1); _histXpTotal = bookHisto1D(9, 1, 1); } void analyze(const Event& e) { const double weight = e.weight(); // Loop through unstable FS particles and look for charmed mesons/baryons - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); const Beam beamproj = apply(e, "Beams"); const ParticlePair& beams = beamproj.beams(); const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum(); LorentzTransform cms_boost; if (mom_tot.p3().mod() > 1*MeV) cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec()); const double s = sqr(beamproj.sqrtS()); // Particle masses from PDGlive (accessed online 16. Nov. 2009). for (const Particle& p : ufs.particles()) { double xp = 0.0; double mH2 = 0.0; // 3-momentum in CMS frame const double mom = cms_boost.transform(p.momentum()).vector3().mod(); const int pdgid = p.abspid(); MSG_DEBUG("pdgID = " << pdgid << " mom = " << mom); switch (pdgid) { case 421: MSG_DEBUG("D0 found"); mH2 = 3.47763; // 1.86484^2 xp = mom/sqrt(s/4.0 - mH2); _sigmaD0A->fill(10.6,weight); _sigmaD0B->fill(10.6,weight); _histXpD0A->fill(xp, weight); _histXpD0B->fill(xp, weight); _histXpTotal->fill(xp, weight); break; case 411: MSG_DEBUG("D+ found"); mH2 = 3.49547; // 1.86962^2 xp = mom/sqrt(s/4.0 - mH2); _sigmaDPlus->fill(10.6,weight); _histXpDplus->fill(xp, weight); _histXpTotal->fill(xp, weight); break; case 413: MSG_DEBUG("D*+ found"); mH2 = 4.04119; // 2.01027^2 xp = mom/sqrt(s/4.0 - mH2); _sigmaDStarPlusA->fill(10.6,weight); _sigmaDStarPlusB->fill(10.6,weight); _histXpDStarPlusA->fill(xp, weight); _histXpDStarPlusB->fill(xp, weight); _histXpTotal->fill(xp, weight); break; case 423: MSG_DEBUG("D*0 found"); mH2 = 4.02793; // 2.00697**2 xp = mom/sqrt(s/4.0 - mH2); _sigmaDStar0A->fill(10.6,weight); _sigmaDStar0B->fill(10.6,weight); _histXpDStar0A->fill(xp, weight); _histXpDStar0B->fill(xp, weight); _histXpTotal->fill(xp, weight); break; } } } void finalize() { scale(_sigmaDPlus , crossSection()/picobarn/sumOfWeights()); scale(_sigmaD0A , crossSection()/picobarn/sumOfWeights()); scale(_sigmaD0B , crossSection()/picobarn/sumOfWeights()); scale(_sigmaDStarPlusA, crossSection()/picobarn/sumOfWeights()); scale(_sigmaDStarPlusB, crossSection()/picobarn/sumOfWeights()); scale(_sigmaDStar0A , crossSection()/picobarn/sumOfWeights()); scale(_sigmaDStar0B , crossSection()/picobarn/sumOfWeights()); scale(_histXpDplus , crossSection()/picobarn/sumOfWeights()); scale(_histXpD0A , crossSection()/picobarn/sumOfWeights()); scale(_histXpD0B , crossSection()/picobarn/sumOfWeights()); scale(_histXpDStarPlusA, crossSection()/picobarn/sumOfWeights()); scale(_histXpDStarPlusB, crossSection()/picobarn/sumOfWeights()); scale(_histXpDStar0A , crossSection()/picobarn/sumOfWeights()); scale(_histXpDStar0B , crossSection()/picobarn/sumOfWeights()); scale(_histXpTotal , crossSection()/picobarn/sumOfWeights()/4.); } private: //@{ // Histograms for the continuum cross sections Histo1DPtr _sigmaDPlus ; Histo1DPtr _sigmaD0A ; Histo1DPtr _sigmaD0B ; Histo1DPtr _sigmaDStarPlusA; Histo1DPtr _sigmaDStarPlusB; Histo1DPtr _sigmaDStar0A ; Histo1DPtr _sigmaDStar0B ; // histograms for continuum data Histo1DPtr _histXpDplus ; Histo1DPtr _histXpD0A ; Histo1DPtr _histXpD0B ; Histo1DPtr _histXpDStarPlusA; Histo1DPtr _histXpDStarPlusB; Histo1DPtr _histXpDStar0A ; Histo1DPtr _histXpDStar0B ; Histo1DPtr _histXpTotal ; //@} }; DECLARE_RIVET_PLUGIN(CLEO_2004_S5809304); } diff --git a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc --- a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc +++ b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc @@ -1,770 +1,770 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Implementation of PDG hadron multiplicities /// @author Hendrik Hoeth class PDG_HADRON_MULTIPLICITIES : public Analysis { public: /// Constructor PDG_HADRON_MULTIPLICITIES() : Analysis("PDG_HADRON_MULTIPLICITIES") { } /// @name Analysis methods //@{ void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); MSG_DEBUG("sqrt(s) = " << sqrtS()/GeV << " GeV"); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight); break; case 111: _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 221: _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); break; case 331: _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight); break; case 431: _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight); break; case 9010221: _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight); break; case 113: _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight); break; case 223: _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight); break; case 323: _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight); break; case 413: _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight); break; case 423: _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight); break; case 433: _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight); break; case 443: _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight); break; case 225: _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; case 3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight); break; case 3312: _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); break; case 2224: _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight); break; case 3114: _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3224: _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3324: _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight); break; case 3334: _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight); break; case 4122: _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight); break; case 4222: case 4112: _histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid(), weight); break; case 3124: _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight); break; } } } if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight); break; case 111: _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 221: _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); break; case 331: _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight); break; case 431: _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight); break; case 9010221: _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight); break; case 113: _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight); break; case 323: _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight); break; case 413: _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight); break; case 423: _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight); break; case 225: _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight); break; case 325: _histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid(), weight); break; case 315: _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; case 3312: _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); break; case 3114: _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3224: _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3334: _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight); break; case 4122: _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight); break; } } } if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight); break; case 111: _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 221: _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); break; case 331: _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight); break; case 431: _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight); break; case 511: _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight); break; case 521: _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight); _histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid(), weight); break; case 531: _histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid(), weight); break; case 9010221: _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight); break; case 9000211: _histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid(), weight); break; case 113: _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight); break; case 213: _histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid(), weight); break; case 223: _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight); break; case 323: _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight); break; case 413: _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight); break; case 433: _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight); break; case 513: case 523: case 533: _histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid(), weight); break; case 443: _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight); break; case 100443: _histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid(), weight); break; case 553: _histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid(), weight); break; case 20223: _histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid(), weight); break; case 20333: _histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid(), weight); break; case 445: _histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid(), weight); break; case 225: _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight); break; case 335: _histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid(), weight); break; case 315: _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight); break; case 515: case 525: case 535: _histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid(), weight); break; case 10433: case 20433: _histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid(), weight); break; case 435: _histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; case 3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight); break; case 3112: _histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid(), weight); _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight); break; case 3222: _histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid(), weight); _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight); break; case 3312: _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); break; case 2224: _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight); break; case 3114: _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3224: _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3324: _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight); break; case 3334: _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight); break; case 4122: _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight); break; case 5122: _histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid(), weight); break; case 3124: _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight); break; } } } if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; } } } } void init() { declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) { _histMeanMultiPiPlus = bookHisto1D( 1, 1, 1); _histMeanMultiPi0 = bookHisto1D( 2, 1, 1); _histMeanMultiKPlus = bookHisto1D( 3, 1, 1); _histMeanMultiK0 = bookHisto1D( 4, 1, 1); _histMeanMultiEta = bookHisto1D( 5, 1, 1); _histMeanMultiEtaPrime = bookHisto1D( 6, 1, 1); _histMeanMultiDPlus = bookHisto1D( 7, 1, 1); _histMeanMultiD0 = bookHisto1D( 8, 1, 1); _histMeanMultiDPlus_s = bookHisto1D( 9, 1, 1); _histMeanMultiF0_980 = bookHisto1D(13, 1, 1); _histMeanMultiRho770_0 = bookHisto1D(15, 1, 1); _histMeanMultiOmega782 = bookHisto1D(17, 1, 1); _histMeanMultiKStar892Plus = bookHisto1D(18, 1, 1); _histMeanMultiKStar892_0 = bookHisto1D(19, 1, 1); _histMeanMultiPhi1020 = bookHisto1D(20, 1, 1); _histMeanMultiDStar2010Plus = bookHisto1D(21, 1, 1); _histMeanMultiDStar2007_0 = bookHisto1D(22, 1, 1); _histMeanMultiDStar_s2112Plus = bookHisto1D(23, 1, 1); _histMeanMultiJPsi1S = bookHisto1D(25, 1, 1); _histMeanMultiF2_1270 = bookHisto1D(31, 1, 1); _histMeanMultiP = bookHisto1D(38, 1, 1); _histMeanMultiLambda = bookHisto1D(39, 1, 1); _histMeanMultiSigma0 = bookHisto1D(40, 1, 1); _histMeanMultiXiMinus = bookHisto1D(44, 1, 1); _histMeanMultiDelta1232PlusPlus = bookHisto1D(45, 1, 1); _histMeanMultiSigma1385Minus = bookHisto1D(46, 1, 1); _histMeanMultiSigma1385Plus = bookHisto1D(47, 1, 1); _histMeanMultiSigma1385PlusMinus = bookHisto1D(48, 1, 1); _histMeanMultiXi1530_0 = bookHisto1D(49, 1, 1); _histMeanMultiOmegaMinus = bookHisto1D(50, 1, 1); _histMeanMultiLambda_c_Plus = bookHisto1D(51, 1, 1); _histMeanMultiSigma_c_PlusPlus_0 = bookHisto1D(53, 1, 1); _histMeanMultiLambda1520 = bookHisto1D(54, 1, 1); } if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) { _histMeanMultiPiPlus = bookHisto1D( 1, 1, 2); _histMeanMultiPi0 = bookHisto1D( 2, 1, 2); _histMeanMultiKPlus = bookHisto1D( 3, 1, 2); _histMeanMultiK0 = bookHisto1D( 4, 1, 2); _histMeanMultiEta = bookHisto1D( 5, 1, 2); _histMeanMultiEtaPrime = bookHisto1D( 6, 1, 2); _histMeanMultiDPlus = bookHisto1D( 7, 1, 2); _histMeanMultiD0 = bookHisto1D( 8, 1, 2); _histMeanMultiDPlus_s = bookHisto1D( 9, 1, 2); _histMeanMultiF0_980 = bookHisto1D(13, 1, 2); _histMeanMultiRho770_0 = bookHisto1D(15, 1, 2); _histMeanMultiKStar892Plus = bookHisto1D(18, 1, 2); _histMeanMultiKStar892_0 = bookHisto1D(19, 1, 2); _histMeanMultiPhi1020 = bookHisto1D(20, 1, 2); _histMeanMultiDStar2010Plus = bookHisto1D(21, 1, 2); _histMeanMultiDStar2007_0 = bookHisto1D(22, 1, 2); _histMeanMultiF2_1270 = bookHisto1D(31, 1, 2); _histMeanMultiK2Star1430Plus = bookHisto1D(33, 1, 1); _histMeanMultiK2Star1430_0 = bookHisto1D(34, 1, 1); _histMeanMultiP = bookHisto1D(38, 1, 2); _histMeanMultiLambda = bookHisto1D(39, 1, 2); _histMeanMultiXiMinus = bookHisto1D(44, 1, 2); _histMeanMultiSigma1385Minus = bookHisto1D(46, 1, 2); _histMeanMultiSigma1385Plus = bookHisto1D(47, 1, 2); _histMeanMultiSigma1385PlusMinus = bookHisto1D(48, 1, 2); _histMeanMultiOmegaMinus = bookHisto1D(50, 1, 2); _histMeanMultiLambda_c_Plus = bookHisto1D(51, 1, 2); } if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) { _histMeanMultiPiPlus = bookHisto1D( 1, 1, 3); _histMeanMultiPi0 = bookHisto1D( 2, 1, 3); _histMeanMultiKPlus = bookHisto1D( 3, 1, 3); _histMeanMultiK0 = bookHisto1D( 4, 1, 3); _histMeanMultiEta = bookHisto1D( 5, 1, 3); _histMeanMultiEtaPrime = bookHisto1D( 6, 1, 3); _histMeanMultiDPlus = bookHisto1D( 7, 1, 3); _histMeanMultiD0 = bookHisto1D( 8, 1, 3); _histMeanMultiDPlus_s = bookHisto1D( 9, 1, 3); _histMeanMultiBPlus_B0_d = bookHisto1D(10, 1, 1); _histMeanMultiBPlus_u = bookHisto1D(11, 1, 1); _histMeanMultiB0_s = bookHisto1D(12, 1, 1); _histMeanMultiF0_980 = bookHisto1D(13, 1, 3); _histMeanMultiA0_980Plus = bookHisto1D(14, 1, 1); _histMeanMultiRho770_0 = bookHisto1D(15, 1, 3); _histMeanMultiRho770Plus = bookHisto1D(16, 1, 1); _histMeanMultiOmega782 = bookHisto1D(17, 1, 2); _histMeanMultiKStar892Plus = bookHisto1D(18, 1, 3); _histMeanMultiKStar892_0 = bookHisto1D(19, 1, 3); _histMeanMultiPhi1020 = bookHisto1D(20, 1, 3); _histMeanMultiDStar2010Plus = bookHisto1D(21, 1, 3); _histMeanMultiDStar_s2112Plus = bookHisto1D(23, 1, 2); _histMeanMultiBStar = bookHisto1D(24, 1, 1); _histMeanMultiJPsi1S = bookHisto1D(25, 1, 2); _histMeanMultiPsi2S = bookHisto1D(26, 1, 1); _histMeanMultiUpsilon1S = bookHisto1D(27, 1, 1); _histMeanMultiF1_1285 = bookHisto1D(28, 1, 1); _histMeanMultiF1_1420 = bookHisto1D(29, 1, 1); _histMeanMultiChi_c1_3510 = bookHisto1D(30, 1, 1); _histMeanMultiF2_1270 = bookHisto1D(31, 1, 3); _histMeanMultiF2Prime1525 = bookHisto1D(32, 1, 1); _histMeanMultiK2Star1430_0 = bookHisto1D(34, 1, 2); _histMeanMultiBStarStar = bookHisto1D(35, 1, 1); _histMeanMultiDs1Plus = bookHisto1D(36, 1, 1); _histMeanMultiDs2Plus = bookHisto1D(37, 1, 1); _histMeanMultiP = bookHisto1D(38, 1, 3); _histMeanMultiLambda = bookHisto1D(39, 1, 3); _histMeanMultiSigma0 = bookHisto1D(40, 1, 2); _histMeanMultiSigmaMinus = bookHisto1D(41, 1, 1); _histMeanMultiSigmaPlus = bookHisto1D(42, 1, 1); _histMeanMultiSigmaPlusMinus = bookHisto1D(43, 1, 1); _histMeanMultiXiMinus = bookHisto1D(44, 1, 3); _histMeanMultiDelta1232PlusPlus = bookHisto1D(45, 1, 2); _histMeanMultiSigma1385Minus = bookHisto1D(46, 1, 3); _histMeanMultiSigma1385Plus = bookHisto1D(47, 1, 3); _histMeanMultiSigma1385PlusMinus = bookHisto1D(48, 1, 3); _histMeanMultiXi1530_0 = bookHisto1D(49, 1, 2); _histMeanMultiOmegaMinus = bookHisto1D(50, 1, 3); _histMeanMultiLambda_c_Plus = bookHisto1D(51, 1, 3); _histMeanMultiLambda_b_0 = bookHisto1D(52, 1, 1); _histMeanMultiLambda1520 = bookHisto1D(54, 1, 2); } if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) { _histMeanMultiPiPlus = bookHisto1D( 1, 1, 4); _histMeanMultiKPlus = bookHisto1D( 3, 1, 4); _histMeanMultiK0 = bookHisto1D( 4, 1, 4); _histMeanMultiP = bookHisto1D(38, 1, 4); _histMeanMultiLambda = bookHisto1D(39, 1, 4); } } // Finalize void finalize() { if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) { scale(_histMeanMultiPiPlus , 1.0/sumOfWeights()); scale(_histMeanMultiPi0 , 1.0/sumOfWeights()); scale(_histMeanMultiKPlus , 1.0/sumOfWeights()); scale(_histMeanMultiK0 , 1.0/sumOfWeights()); scale(_histMeanMultiEta , 1.0/sumOfWeights()); scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights()); scale(_histMeanMultiDPlus , 1.0/sumOfWeights()); scale(_histMeanMultiD0 , 1.0/sumOfWeights()); scale(_histMeanMultiDPlus_s , 1.0/sumOfWeights()); scale(_histMeanMultiF0_980 , 1.0/sumOfWeights()); scale(_histMeanMultiRho770_0 , 1.0/sumOfWeights()); scale(_histMeanMultiOmega782 , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights()); scale(_histMeanMultiPhi1020 , 1.0/sumOfWeights()); scale(_histMeanMultiDStar2010Plus , 1.0/sumOfWeights()); scale(_histMeanMultiDStar2007_0 , 1.0/sumOfWeights()); scale(_histMeanMultiDStar_s2112Plus , 1.0/sumOfWeights()); scale(_histMeanMultiJPsi1S , 1.0/sumOfWeights()); scale(_histMeanMultiF2_1270 , 1.0/sumOfWeights()); scale(_histMeanMultiP , 1.0/sumOfWeights()); scale(_histMeanMultiLambda , 1.0/sumOfWeights()); scale(_histMeanMultiSigma0 , 1.0/sumOfWeights()); scale(_histMeanMultiXiMinus , 1.0/sumOfWeights()); scale(_histMeanMultiDelta1232PlusPlus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Minus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Plus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385PlusMinus, 1.0/sumOfWeights()); scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights()); scale(_histMeanMultiOmegaMinus , 1.0/sumOfWeights()); scale(_histMeanMultiLambda_c_Plus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma_c_PlusPlus_0, 1.0/sumOfWeights()); scale(_histMeanMultiLambda1520 , 1.0/sumOfWeights()); } if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) { scale(_histMeanMultiPiPlus , 5.0/sumOfWeights()); scale(_histMeanMultiPi0 , 5.0/sumOfWeights()); scale(_histMeanMultiKPlus , 5.0/sumOfWeights()); scale(_histMeanMultiK0 , 5.0/sumOfWeights()); scale(_histMeanMultiEta , 5.0/sumOfWeights()); scale(_histMeanMultiEtaPrime , 5.0/sumOfWeights()); scale(_histMeanMultiDPlus , 5.0/sumOfWeights()); scale(_histMeanMultiD0 , 5.0/sumOfWeights()); scale(_histMeanMultiDPlus_s , 5.0/sumOfWeights()); scale(_histMeanMultiF0_980 , 5.0/sumOfWeights()); scale(_histMeanMultiRho770_0 , 5.0/sumOfWeights()); scale(_histMeanMultiKStar892Plus , 5.0/sumOfWeights()); scale(_histMeanMultiKStar892_0 , 5.0/sumOfWeights()); scale(_histMeanMultiPhi1020 , 5.0/sumOfWeights()); scale(_histMeanMultiDStar2010Plus , 5.0/sumOfWeights()); scale(_histMeanMultiDStar2007_0 , 5.0/sumOfWeights()); scale(_histMeanMultiF2_1270 , 5.0/sumOfWeights()); scale(_histMeanMultiK2Star1430Plus , 5.0/sumOfWeights()); scale(_histMeanMultiK2Star1430_0 , 5.0/sumOfWeights()); scale(_histMeanMultiP , 5.0/sumOfWeights()); scale(_histMeanMultiLambda , 5.0/sumOfWeights()); scale(_histMeanMultiXiMinus , 5.0/sumOfWeights()); scale(_histMeanMultiSigma1385Minus , 5.0/sumOfWeights()); scale(_histMeanMultiSigma1385Plus , 5.0/sumOfWeights()); scale(_histMeanMultiSigma1385PlusMinus, 5.0/sumOfWeights()); scale(_histMeanMultiOmegaMinus , 5.0/sumOfWeights()); scale(_histMeanMultiLambda_c_Plus , 5.0/sumOfWeights()); } if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) { scale(_histMeanMultiPiPlus , 1.0/sumOfWeights()); scale(_histMeanMultiPi0 , 1.0/sumOfWeights()); scale(_histMeanMultiKPlus , 1.0/sumOfWeights()); scale(_histMeanMultiK0 , 1.0/sumOfWeights()); scale(_histMeanMultiEta , 1.0/sumOfWeights()); scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights()); scale(_histMeanMultiDPlus , 1.0/sumOfWeights()); scale(_histMeanMultiD0 , 1.0/sumOfWeights()); scale(_histMeanMultiDPlus_s , 1.0/sumOfWeights()); scale(_histMeanMultiBPlus_B0_d , 1.0/sumOfWeights()); scale(_histMeanMultiBPlus_u , 1.0/sumOfWeights()); scale(_histMeanMultiB0_s , 1.0/sumOfWeights()); scale(_histMeanMultiF0_980 , 1.0/sumOfWeights()); scale(_histMeanMultiA0_980Plus , 1.0/sumOfWeights()); scale(_histMeanMultiRho770_0 , 1.0/sumOfWeights()); scale(_histMeanMultiRho770Plus , 1.0/sumOfWeights()); scale(_histMeanMultiOmega782 , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights()); scale(_histMeanMultiPhi1020 , 1.0/sumOfWeights()); scale(_histMeanMultiDStar2010Plus , 1.0/sumOfWeights()); scale(_histMeanMultiDStar_s2112Plus , 1.0/sumOfWeights()); scale(_histMeanMultiBStar , 1.0/sumOfWeights()); scale(_histMeanMultiJPsi1S , 1.0/sumOfWeights()); scale(_histMeanMultiPsi2S , 1.0/sumOfWeights()); scale(_histMeanMultiUpsilon1S , 1.0/sumOfWeights()); scale(_histMeanMultiF1_1285 , 1.0/sumOfWeights()); scale(_histMeanMultiF1_1420 , 1.0/sumOfWeights()); scale(_histMeanMultiChi_c1_3510 , 1.0/sumOfWeights()); scale(_histMeanMultiF2_1270 , 1.0/sumOfWeights()); scale(_histMeanMultiF2Prime1525 , 1.0/sumOfWeights()); scale(_histMeanMultiK2Star1430_0 , 1.0/sumOfWeights()); scale(_histMeanMultiBStarStar , 1.0/sumOfWeights()); scale(_histMeanMultiDs1Plus , 1.0/sumOfWeights()); scale(_histMeanMultiDs2Plus , 1.0/sumOfWeights()); scale(_histMeanMultiP , 1.0/sumOfWeights()); scale(_histMeanMultiLambda , 1.0/sumOfWeights()); scale(_histMeanMultiSigma0 , 1.0/sumOfWeights()); scale(_histMeanMultiSigmaMinus , 1.0/sumOfWeights()); scale(_histMeanMultiSigmaPlus , 1.0/sumOfWeights()); scale(_histMeanMultiSigmaPlusMinus , 1.0/sumOfWeights()); scale(_histMeanMultiXiMinus , 1.0/sumOfWeights()); scale(_histMeanMultiDelta1232PlusPlus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Minus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Plus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385PlusMinus, 1.0/sumOfWeights()); scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights()); scale(_histMeanMultiOmegaMinus , 1.0/sumOfWeights()); scale(_histMeanMultiLambda_c_Plus , 1.0/sumOfWeights()); scale(_histMeanMultiLambda_b_0 , 1.0/sumOfWeights()); scale(_histMeanMultiLambda1520 , 1.0/sumOfWeights()); } if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) { scale(_histMeanMultiPiPlus , 70.0/sumOfWeights()); scale(_histMeanMultiKPlus , 70.0/sumOfWeights()); scale(_histMeanMultiK0 , 70.0/sumOfWeights()); scale(_histMeanMultiP , 70.0/sumOfWeights()); scale(_histMeanMultiLambda , 70.0/sumOfWeights()); } } //@} private: Histo1DPtr _histMeanMultiPiPlus; Histo1DPtr _histMeanMultiPi0; Histo1DPtr _histMeanMultiKPlus; Histo1DPtr _histMeanMultiK0; Histo1DPtr _histMeanMultiEta; Histo1DPtr _histMeanMultiEtaPrime; Histo1DPtr _histMeanMultiDPlus; Histo1DPtr _histMeanMultiD0; Histo1DPtr _histMeanMultiDPlus_s; Histo1DPtr _histMeanMultiBPlus_B0_d; Histo1DPtr _histMeanMultiBPlus_u; Histo1DPtr _histMeanMultiB0_s; Histo1DPtr _histMeanMultiF0_980; Histo1DPtr _histMeanMultiA0_980Plus; Histo1DPtr _histMeanMultiRho770_0; Histo1DPtr _histMeanMultiRho770Plus; Histo1DPtr _histMeanMultiOmega782; Histo1DPtr _histMeanMultiKStar892Plus; Histo1DPtr _histMeanMultiKStar892_0; Histo1DPtr _histMeanMultiPhi1020; Histo1DPtr _histMeanMultiDStar2010Plus; Histo1DPtr _histMeanMultiDStar2007_0; Histo1DPtr _histMeanMultiDStar_s2112Plus; Histo1DPtr _histMeanMultiBStar; Histo1DPtr _histMeanMultiJPsi1S; Histo1DPtr _histMeanMultiPsi2S; Histo1DPtr _histMeanMultiUpsilon1S; Histo1DPtr _histMeanMultiF1_1285; Histo1DPtr _histMeanMultiF1_1420; Histo1DPtr _histMeanMultiChi_c1_3510; Histo1DPtr _histMeanMultiF2_1270; Histo1DPtr _histMeanMultiF2Prime1525; Histo1DPtr _histMeanMultiK2Star1430Plus; Histo1DPtr _histMeanMultiK2Star1430_0; Histo1DPtr _histMeanMultiBStarStar; Histo1DPtr _histMeanMultiDs1Plus; Histo1DPtr _histMeanMultiDs2Plus; Histo1DPtr _histMeanMultiP; Histo1DPtr _histMeanMultiLambda; Histo1DPtr _histMeanMultiSigma0; Histo1DPtr _histMeanMultiSigmaMinus; Histo1DPtr _histMeanMultiSigmaPlus; Histo1DPtr _histMeanMultiSigmaPlusMinus; Histo1DPtr _histMeanMultiXiMinus; Histo1DPtr _histMeanMultiDelta1232PlusPlus; Histo1DPtr _histMeanMultiSigma1385Minus; Histo1DPtr _histMeanMultiSigma1385Plus; Histo1DPtr _histMeanMultiSigma1385PlusMinus; Histo1DPtr _histMeanMultiXi1530_0; Histo1DPtr _histMeanMultiOmegaMinus; Histo1DPtr _histMeanMultiLambda_c_Plus; Histo1DPtr _histMeanMultiLambda_b_0; Histo1DPtr _histMeanMultiSigma_c_PlusPlus_0; Histo1DPtr _histMeanMultiLambda1520; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(PDG_HADRON_MULTIPLICITIES); } diff --git a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc --- a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc +++ b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc @@ -1,764 +1,764 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Implementation of PDG hadron multiplicities as ratios to \f$ \pi^\pm \f$ multiplicity /// @author Holger Schulz class PDG_HADRON_MULTIPLICITIES_RATIOS : public Analysis { public: /// Constructor PDG_HADRON_MULTIPLICITIES_RATIOS() : Analysis("PDG_HADRON_MULTIPLICITIES_RATIOS") { _weightedTotalNumPiPlus = 0; } /// @name Analysis methods //@{ void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get event weight for histo filling const double weight = e.weight(); MSG_DEBUG("sqrt(S) = " << sqrtS()/GeV << " GeV"); // Final state of unstable particles to get particle spectra - const UnstableFinalState& ufs = apply(e, "UFS"); + const UnstableParticles& ufs = apply(e, "UFS"); if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _weightedTotalNumPiPlus += weight; break; case 111: _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 221: _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); break; case 331: _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight); break; case 431: _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight); break; case 9010221: _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight); break; case 113: _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight); break; case 223: _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight); break; case 323: _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight); break; case 413: _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight); break; case 423: _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight); break; case 433: _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight); break; case 443: _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight); break; case 225: _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; case 3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight); break; case 3312: _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); break; case 2224: _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight); break; case 3114: _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3224: _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3324: _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight); break; case 3334: _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight); break; case 4122: _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight); break; case 4222: case 4112: _histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid(), weight); break; case 3124: _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight); break; } } } if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _weightedTotalNumPiPlus += weight; break; case 111: _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 221: _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); break; case 331: _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight); break; case 431: _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight); break; case 9010221: _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight); break; case 113: _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight); break; case 323: _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight); break; case 413: _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight); break; case 423: _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight); break; case 225: _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight); break; case 325: _histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid(), weight); break; case 315: _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; case 3312: _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); break; case 3114: _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3224: _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3334: _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight); break; case 4122: _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight); break; } } } if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _weightedTotalNumPiPlus += weight; break; case 111: _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight); break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 221: _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight); break; case 331: _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight); break; case 411: _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight); break; case 421: _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight); break; case 431: _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight); break; case 511: _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight); break; case 521: _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight); _histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid(), weight); break; case 531: _histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid(), weight); break; case 9010221: _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight); break; case 9000211: _histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid(), weight); break; case 113: _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight); break; case 213: _histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid(), weight); break; case 223: _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight); break; case 323: _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight); break; case 313: _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight); break; case 333: _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight); break; case 413: _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight); break; case 433: _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight); break; case 513: case 523: case 533: _histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid(), weight); break; case 443: _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight); break; case 100443: _histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid(), weight); break; case 553: _histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid(), weight); break; case 20223: _histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid(), weight); break; case 20333: _histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid(), weight); break; case 445: _histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid(), weight); break; case 225: _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight); break; case 335: _histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid(), weight); break; case 315: _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight); break; case 515: case 525: case 535: _histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid(), weight); break; case 10433: case 20433: _histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid(), weight); break; case 435: _histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; case 3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight); break; case 3112: _histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid(), weight); _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight); break; case 3222: _histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid(), weight); _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight); break; case 3312: _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight); break; case 2224: _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight); break; case 3114: _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3224: _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight); _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight); break; case 3324: _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight); break; case 3334: _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight); break; case 4122: _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight); break; case 5122: _histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid(), weight); break; case 3124: _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight); break; } } } if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) { foreach (const Particle& p, ufs.particles()) { const PdgId id = p.abspid(); switch (id) { case 211: _weightedTotalNumPiPlus += weight; break; case 321: _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight); break; case 130: case 310: _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight); break; case 2212: _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight); break; case 3122: _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight); break; } } } } void init() { declare(ChargedFinalState(), "FS"); - declare(UnstableFinalState(), "UFS"); + declare(UnstableParticles(), "UFS"); if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) { _histMeanMultiPi0 = bookHisto1D( 2, 1, 1); _histMeanMultiKPlus = bookHisto1D( 3, 1, 1); _histMeanMultiK0 = bookHisto1D( 4, 1, 1); _histMeanMultiEta = bookHisto1D( 5, 1, 1); _histMeanMultiEtaPrime = bookHisto1D( 6, 1, 1); _histMeanMultiDPlus = bookHisto1D( 7, 1, 1); _histMeanMultiD0 = bookHisto1D( 8, 1, 1); _histMeanMultiDPlus_s = bookHisto1D( 9, 1, 1); _histMeanMultiF0_980 = bookHisto1D(13, 1, 1); _histMeanMultiRho770_0 = bookHisto1D(15, 1, 1); _histMeanMultiOmega782 = bookHisto1D(17, 1, 1); _histMeanMultiKStar892Plus = bookHisto1D(18, 1, 1); _histMeanMultiKStar892_0 = bookHisto1D(19, 1, 1); _histMeanMultiPhi1020 = bookHisto1D(20, 1, 1); _histMeanMultiDStar2010Plus = bookHisto1D(21, 1, 1); _histMeanMultiDStar2007_0 = bookHisto1D(22, 1, 1); _histMeanMultiDStar_s2112Plus = bookHisto1D(23, 1, 1); _histMeanMultiJPsi1S = bookHisto1D(25, 1, 1); _histMeanMultiF2_1270 = bookHisto1D(31, 1, 1); _histMeanMultiP = bookHisto1D(38, 1, 1); _histMeanMultiLambda = bookHisto1D(39, 1, 1); _histMeanMultiSigma0 = bookHisto1D(40, 1, 1); _histMeanMultiXiMinus = bookHisto1D(44, 1, 1); _histMeanMultiDelta1232PlusPlus = bookHisto1D(45, 1, 1); _histMeanMultiSigma1385Minus = bookHisto1D(46, 1, 1); _histMeanMultiSigma1385Plus = bookHisto1D(47, 1, 1); _histMeanMultiSigma1385PlusMinus = bookHisto1D(48, 1, 1); _histMeanMultiXi1530_0 = bookHisto1D(49, 1, 1); _histMeanMultiOmegaMinus = bookHisto1D(50, 1, 1); _histMeanMultiLambda_c_Plus = bookHisto1D(51, 1, 1); _histMeanMultiSigma_c_PlusPlus_0 = bookHisto1D(53, 1, 1); _histMeanMultiLambda1520 = bookHisto1D(54, 1, 1); } if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) { _histMeanMultiPi0 = bookHisto1D( 2, 1, 2); _histMeanMultiKPlus = bookHisto1D( 3, 1, 2); _histMeanMultiK0 = bookHisto1D( 4, 1, 2); _histMeanMultiEta = bookHisto1D( 5, 1, 2); _histMeanMultiEtaPrime = bookHisto1D( 6, 1, 2); _histMeanMultiDPlus = bookHisto1D( 7, 1, 2); _histMeanMultiD0 = bookHisto1D( 8, 1, 2); _histMeanMultiDPlus_s = bookHisto1D( 9, 1, 2); _histMeanMultiF0_980 = bookHisto1D(13, 1, 2); _histMeanMultiRho770_0 = bookHisto1D(15, 1, 2); _histMeanMultiKStar892Plus = bookHisto1D(18, 1, 2); _histMeanMultiKStar892_0 = bookHisto1D(19, 1, 2); _histMeanMultiPhi1020 = bookHisto1D(20, 1, 2); _histMeanMultiDStar2010Plus = bookHisto1D(21, 1, 2); _histMeanMultiDStar2007_0 = bookHisto1D(22, 1, 2); _histMeanMultiF2_1270 = bookHisto1D(31, 1, 2); _histMeanMultiK2Star1430Plus = bookHisto1D(33, 1, 1); _histMeanMultiK2Star1430_0 = bookHisto1D(34, 1, 1); _histMeanMultiP = bookHisto1D(38, 1, 2); _histMeanMultiLambda = bookHisto1D(39, 1, 2); _histMeanMultiXiMinus = bookHisto1D(44, 1, 2); _histMeanMultiSigma1385Minus = bookHisto1D(46, 1, 2); _histMeanMultiSigma1385Plus = bookHisto1D(47, 1, 2); _histMeanMultiSigma1385PlusMinus = bookHisto1D(48, 1, 2); _histMeanMultiOmegaMinus = bookHisto1D(50, 1, 2); _histMeanMultiLambda_c_Plus = bookHisto1D(51, 1, 2); } if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) { _histMeanMultiPi0 = bookHisto1D( 2, 1, 3); _histMeanMultiKPlus = bookHisto1D( 3, 1, 3); _histMeanMultiK0 = bookHisto1D( 4, 1, 3); _histMeanMultiEta = bookHisto1D( 5, 1, 3); _histMeanMultiEtaPrime = bookHisto1D( 6, 1, 3); _histMeanMultiDPlus = bookHisto1D( 7, 1, 3); _histMeanMultiD0 = bookHisto1D( 8, 1, 3); _histMeanMultiDPlus_s = bookHisto1D( 9, 1, 3); _histMeanMultiBPlus_B0_d = bookHisto1D(10, 1, 1); _histMeanMultiBPlus_u = bookHisto1D(11, 1, 1); _histMeanMultiB0_s = bookHisto1D(12, 1, 1); _histMeanMultiF0_980 = bookHisto1D(13, 1, 3); _histMeanMultiA0_980Plus = bookHisto1D(14, 1, 1); _histMeanMultiRho770_0 = bookHisto1D(15, 1, 3); _histMeanMultiRho770Plus = bookHisto1D(16, 1, 1); _histMeanMultiOmega782 = bookHisto1D(17, 1, 2); _histMeanMultiKStar892Plus = bookHisto1D(18, 1, 3); _histMeanMultiKStar892_0 = bookHisto1D(19, 1, 3); _histMeanMultiPhi1020 = bookHisto1D(20, 1, 3); _histMeanMultiDStar2010Plus = bookHisto1D(21, 1, 3); _histMeanMultiDStar_s2112Plus = bookHisto1D(23, 1, 2); _histMeanMultiBStar = bookHisto1D(24, 1, 1); _histMeanMultiJPsi1S = bookHisto1D(25, 1, 2); _histMeanMultiPsi2S = bookHisto1D(26, 1, 1); _histMeanMultiUpsilon1S = bookHisto1D(27, 1, 1); _histMeanMultiF1_1285 = bookHisto1D(28, 1, 1); _histMeanMultiF1_1420 = bookHisto1D(29, 1, 1); _histMeanMultiChi_c1_3510 = bookHisto1D(30, 1, 1); _histMeanMultiF2_1270 = bookHisto1D(31, 1, 3); _histMeanMultiF2Prime1525 = bookHisto1D(32, 1, 1); _histMeanMultiK2Star1430_0 = bookHisto1D(34, 1, 2); _histMeanMultiBStarStar = bookHisto1D(35, 1, 1); _histMeanMultiDs1Plus = bookHisto1D(36, 1, 1); _histMeanMultiDs2Plus = bookHisto1D(37, 1, 1); _histMeanMultiP = bookHisto1D(38, 1, 3); _histMeanMultiLambda = bookHisto1D(39, 1, 3); _histMeanMultiSigma0 = bookHisto1D(40, 1, 2); _histMeanMultiSigmaMinus = bookHisto1D(41, 1, 1); _histMeanMultiSigmaPlus = bookHisto1D(42, 1, 1); _histMeanMultiSigmaPlusMinus = bookHisto1D(43, 1, 1); _histMeanMultiXiMinus = bookHisto1D(44, 1, 3); _histMeanMultiDelta1232PlusPlus = bookHisto1D(45, 1, 2); _histMeanMultiSigma1385Minus = bookHisto1D(46, 1, 3); _histMeanMultiSigma1385Plus = bookHisto1D(47, 1, 3); _histMeanMultiSigma1385PlusMinus = bookHisto1D(48, 1, 3); _histMeanMultiXi1530_0 = bookHisto1D(49, 1, 2); _histMeanMultiOmegaMinus = bookHisto1D(50, 1, 3); _histMeanMultiLambda_c_Plus = bookHisto1D(51, 1, 3); _histMeanMultiLambda_b_0 = bookHisto1D(52, 1, 1); _histMeanMultiLambda1520 = bookHisto1D(54, 1, 2); } if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) { _histMeanMultiKPlus = bookHisto1D( 3, 1, 4); _histMeanMultiK0 = bookHisto1D( 4, 1, 4); _histMeanMultiP = bookHisto1D(38, 1, 4); _histMeanMultiLambda = bookHisto1D(39, 1, 4); } } // Finalize void finalize() { if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) { scale(_histMeanMultiPi0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiEta , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiEtaPrime , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiD0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDPlus_s , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF0_980 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiRho770_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiOmega782 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKStar892Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKStar892_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiPhi1020 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar2010Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar2007_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar_s2112Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiJPsi1S , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF2_1270 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiP , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiXiMinus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDelta1232PlusPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385Minus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385PlusMinus, 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiXi1530_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiOmegaMinus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda_c_Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma_c_PlusPlus_0, 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda1520 , 1.0/_weightedTotalNumPiPlus); } if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) { scale(_histMeanMultiPi0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKPlus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiEta , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiEtaPrime , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDPlus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiD0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDPlus_s , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF0_980 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiRho770_0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKStar892Plus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKStar892_0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiPhi1020 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar2010Plus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar2007_0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF2_1270 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK2Star1430Plus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK2Star1430_0 , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiP , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiXiMinus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385Minus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385Plus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385PlusMinus, 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiOmegaMinus , 5.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda_c_Plus , 5.0/_weightedTotalNumPiPlus); } if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) { scale(_histMeanMultiPi0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiEta , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiEtaPrime , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiD0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDPlus_s , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiBPlus_B0_d , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiBPlus_u , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiB0_s , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF0_980 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiA0_980Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiRho770_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiRho770Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiOmega782 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKStar892Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiKStar892_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiPhi1020 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar2010Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDStar_s2112Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiBStar , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiJPsi1S , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiPsi2S , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiUpsilon1S , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF1_1285 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF1_1420 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiChi_c1_3510 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF2_1270 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiF2Prime1525 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK2Star1430_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiBStarStar , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDs1Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDs2Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiP , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigmaMinus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigmaPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigmaPlusMinus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiXiMinus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiDelta1232PlusPlus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385Minus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiSigma1385PlusMinus, 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiXi1530_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiOmegaMinus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda_c_Plus , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda_b_0 , 1.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda1520 , 1.0/_weightedTotalNumPiPlus); } if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) { scale(_histMeanMultiKPlus , 70.0/_weightedTotalNumPiPlus); scale(_histMeanMultiK0 , 70.0/_weightedTotalNumPiPlus); scale(_histMeanMultiP , 70.0/_weightedTotalNumPiPlus); scale(_histMeanMultiLambda , 70.0/_weightedTotalNumPiPlus); } } //@} private: double _weightedTotalNumPiPlus; Histo1DPtr _histMeanMultiPi0; Histo1DPtr _histMeanMultiKPlus; Histo1DPtr _histMeanMultiK0; Histo1DPtr _histMeanMultiEta; Histo1DPtr _histMeanMultiEtaPrime; Histo1DPtr _histMeanMultiDPlus; Histo1DPtr _histMeanMultiD0; Histo1DPtr _histMeanMultiDPlus_s; Histo1DPtr _histMeanMultiBPlus_B0_d; Histo1DPtr _histMeanMultiBPlus_u; Histo1DPtr _histMeanMultiB0_s; Histo1DPtr _histMeanMultiF0_980; Histo1DPtr _histMeanMultiA0_980Plus; Histo1DPtr _histMeanMultiRho770_0; Histo1DPtr _histMeanMultiRho770Plus; Histo1DPtr _histMeanMultiOmega782; Histo1DPtr _histMeanMultiKStar892Plus; Histo1DPtr _histMeanMultiKStar892_0; Histo1DPtr _histMeanMultiPhi1020; Histo1DPtr _histMeanMultiDStar2010Plus; Histo1DPtr _histMeanMultiDStar2007_0; Histo1DPtr _histMeanMultiDStar_s2112Plus; Histo1DPtr _histMeanMultiBStar; Histo1DPtr _histMeanMultiJPsi1S; Histo1DPtr _histMeanMultiPsi2S; Histo1DPtr _histMeanMultiUpsilon1S; Histo1DPtr _histMeanMultiF1_1285; Histo1DPtr _histMeanMultiF1_1420; Histo1DPtr _histMeanMultiChi_c1_3510; Histo1DPtr _histMeanMultiF2_1270; Histo1DPtr _histMeanMultiF2Prime1525; Histo1DPtr _histMeanMultiK2Star1430Plus; Histo1DPtr _histMeanMultiK2Star1430_0; Histo1DPtr _histMeanMultiBStarStar; Histo1DPtr _histMeanMultiDs1Plus; Histo1DPtr _histMeanMultiDs2Plus; Histo1DPtr _histMeanMultiP; Histo1DPtr _histMeanMultiLambda; Histo1DPtr _histMeanMultiSigma0; Histo1DPtr _histMeanMultiSigmaMinus; Histo1DPtr _histMeanMultiSigmaPlus; Histo1DPtr _histMeanMultiSigmaPlusMinus; Histo1DPtr _histMeanMultiXiMinus; Histo1DPtr _histMeanMultiDelta1232PlusPlus; Histo1DPtr _histMeanMultiSigma1385Minus; Histo1DPtr _histMeanMultiSigma1385Plus; Histo1DPtr _histMeanMultiSigma1385PlusMinus; Histo1DPtr _histMeanMultiXi1530_0; Histo1DPtr _histMeanMultiOmegaMinus; Histo1DPtr _histMeanMultiLambda_c_Plus; Histo1DPtr _histMeanMultiLambda_b_0; Histo1DPtr _histMeanMultiSigma_c_PlusPlus_0; Histo1DPtr _histMeanMultiLambda1520; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(PDG_HADRON_MULTIPLICITIES_RATIOS); } diff --git a/analyses/pluginRHIC/BRAHMS_2004_I647076.cc b/analyses/pluginRHIC/BRAHMS_2004_I647076.cc --- a/analyses/pluginRHIC/BRAHMS_2004_I647076.cc +++ b/analyses/pluginRHIC/BRAHMS_2004_I647076.cc @@ -1,217 +1,217 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Projections/ImpactParameterProjection.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// @brief BRAHMS Centrality projection. class BRAHMSCentrality : public SingleValueProjection { public: // Constructor BRAHMSCentrality() : SingleValueProjection() { // Using here the BRAHMS reaction centrality from eg. 1602.01183, which // might not be correct. declare(ChargedFinalState(Cuts::pT > 0.1*GeV && Cuts::abseta < 2.2), "ChargedFinalState"); } // Destructor virtual ~BRAHMSCentrality() {} // Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(BRAHMSCentrality); protected: // Do the projection. Count the number of charged particles in // the specified range. virtual void project(const Event& e) { clear(); set(apply (e, "ChargedFinalState").particles().size()); } // Compare to another projection. virtual int compare(const Projection& p) const { // This projection is only used for the analysis below. return UNDEFINED; } }; /// @brief Brahms centrality calibration analysis based on the // BrahmsCentrality projection. No data is given for this // analysis, so one MUST do a calibration run. class BRAHMS_2004_CENTRALITY : public Analysis { public: // Constructor BRAHMS_2004_CENTRALITY() : Analysis("BRAHMS_2004_CENTRALITY") {} // Initialize the analysis void init() { declare(BRAHMSCentrality(),"Centrality"); declare(ImpactParameterProjection(), "IMP"); // The central multiplicity. mult = bookHisto1D("mult",450,0,4500); // Safeguard against filling preloaded histograms. done = (mult->numEntries() > 0); // The impact parameter. imp = bookHisto1D("mult_IMP",100,0,20); } // Analyse a single event void analyze(const Event& event) { if (done) return; // Fill impact parameter. imp->fill(apply(event,"IMP")(), event.weight()); // Fill multiplicity. mult->fill(apply(event,"Centrality")(), event.weight()); } // Finalize the analysis void finalize() { // Normalize the distributions, safeguarding against // yoda normalization error. if(mult->numEntries() > 0) mult->normalize(); if(imp->numEntries() > 0) imp->normalize(); } private: // Histograms. Histo1DPtr mult; Histo1DPtr imp; // Flag to test if we have preloaded histograms. bool done; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BRAHMS_2004_CENTRALITY); /// @brief Brahms pT spectra for id particles (pi+, pi-, K+, K-) // in small bins of rapidity, 5% central collisions. // System: AuAu @ 200GeV/nn. class BRAHMS_2004_I647076 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BRAHMS_2004_I647076); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // Centrality Projection. declareCentrality(BRAHMSCentrality(), "BRAHMS_2004_CENTRALITY","mult","BCEN"); // TODO: Feed down correction is unclear. declare(FinalState(Cuts::rap < 4 && Cuts::rap > -0.1 && Cuts::pT > 100*MeV), "FS"); // The measured rapidity intervals for pions. rapIntervalsPi = {{-0.1,0.},{0.,0.1},{0.4,0.6},{0.6,0.8},{0.8,1.0}, {1.0,1.2},{1.2,1.4},{2.1,2.3},{2.4,2.6},{3.0,3.1},{3.1,3.2},{3.2,3.3}, {3.3,3.4},{3.4,3.66}}; // The measured rapidity intervals for kaons. rapIntervalsK = {{-0.1,0.},{0.,0.1},{0.4,0.6},{0.6,0.8},{0.8,1.0}, {1.0,1.2},{2.0,2.2},{2.3,2.5},{2.9,3.0},{3.0,3.1},{3.1,3.2},{3.2,3.4}}; // Book histograms for (int i = 1, N = rapIntervalsPi.size(); i <= N; ++i) { piPlus.push_back(bookHisto1D(1, 1, i)); piMinus.push_back(bookHisto1D(1, 1, 14 + i)); } for (int i = 1, N = rapIntervalsK.size(); i <= N; ++i) { kPlus.push_back(bookHisto1D(2, 1, i)); kMinus.push_back(bookHisto1D(2, 1, 12 + i)); } // Counter for accepted sum of weights (centrality cut). centSow = bookCounter("centSow"); } /// Perform the per-event analysis void analyze(const Event& event) { const double w = event.weight(); // Reject all non-central events. The paper does not speak of // any other event trigger, which in any case should matter // little for central events. if(apply(event,"BCEN")() > 5.0) return; // Keep track of sum of weights. centSow->fill(w); const FinalState& fs = apply(event,"FS"); // Loop over particles. for (const auto& p : fs.particles()) { const double y = p.rapidity(); const double pT = p.pT(); const int id = p.pid(); // First pions. if (abs(id) == 211) { // Protect against decaying K0S and Lambda if (p.hasAncestor(310) || p.hasAncestor(-310) || p.hasAncestor(3122) || p.hasAncestor(3122)) continue; for (int i = 0, N = rapIntervalsPi.size(); i < N; ++i) { if (y > rapIntervalsPi[i].first && y <= rapIntervalsPi[i].second) { const double dy = rapIntervalsPi[i].second - rapIntervalsPi[i].first; const double nWeight = w / ( 2.*M_PI*pT*dy); if (id == 211) piPlus[i]->fill(pT, nWeight); else piMinus[i]->fill(pT, nWeight); break; } } } // Then kaons. else if (abs(id) == 321) { for (int i = 0, N = rapIntervalsK.size(); i < N; ++i) { if (y > rapIntervalsK[i].first && y <= rapIntervalsK[i].second) { const double dy = rapIntervalsK[i].second - rapIntervalsK[i].first; const double nWeight = w / ( 2.*M_PI*pT*dy); if (id == 321) kPlus[i]->fill(pT, nWeight); else kMinus[i]->fill(pT, nWeight); break; } } } } } /// Normalise histograms etc., after the run void finalize() { // Normalize all histograms to per-event yields. for (int i = 0, N = rapIntervalsPi.size(); i < N; ++i) { piPlus[i]->scaleW(1./centSow->sumW()); piMinus[i]->scaleW(1./centSow->sumW()); } for (int i = 0, N = rapIntervalsK.size(); i < N; ++i) { kPlus[i]->scaleW(1./centSow->sumW()); kMinus[i]->scaleW(1./centSow->sumW()); } } //@} // The rapidity intervals. vector > rapIntervalsPi; vector > rapIntervalsK; /// @name Histograms //@{ vector piPlus; vector piMinus; vector kPlus; vector kMinus; CounterPtr centSow; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BRAHMS_2004_I647076); } diff --git a/analyses/pluginRHIC/STAR_2006_S6860818.cc b/analyses/pluginRHIC/STAR_2006_S6860818.cc --- a/analyses/pluginRHIC/STAR_2006_S6860818.cc +++ b/analyses/pluginRHIC/STAR_2006_S6860818.cc @@ -1,193 +1,193 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief STAR strange particle spectra in pp at 200 GeV class STAR_2006_S6860818 : public Analysis { public: /// Constructor STAR_2006_S6860818() : Analysis("STAR_2006_S6860818"), _sumWeightSelected(0.0) { for (size_t i = 0; i < 4; i++) { _nBaryon[i] = 0; _nAntiBaryon[i] = 0; _nWeightedBaryon[i] = 0.; _nWeightedAntiBaryon[i] = 0.; } } /// Book projections and histograms void init() { ChargedFinalState bbc1(Cuts::etaIn(-5.0, -3.5)); // beam-beam-counter trigger ChargedFinalState bbc2(Cuts::etaIn( 3.5, 5.0)); // beam-beam-counter trigger declare(bbc1, "BBC1"); declare(bbc2, "BBC2"); - UnstableFinalState ufs(Cuts::abseta < 2.5); + UnstableParticles ufs(Cuts::abseta < 2.5); declare(ufs, "UFS"); _h_pT_k0s = bookHisto1D(1, 1, 1); _h_pT_kminus = bookHisto1D(1, 2, 1); _h_pT_kplus = bookHisto1D(1, 3, 1); _h_pT_lambda = bookHisto1D(1, 4, 1); _h_pT_lambdabar = bookHisto1D(1, 5, 1); _h_pT_ximinus = bookHisto1D(1, 6, 1); _h_pT_xiplus = bookHisto1D(1, 7, 1); //_h_pT_omega = bookHisto1D(1, 8, 1); _h_antibaryon_baryon_ratio = bookScatter2D(2, 1, 1); _h_lambar_lam = bookScatter2D(2, 2, 1); _h_xiplus_ximinus = bookScatter2D(2, 3, 1); _h_pT_vs_mass = bookProfile1D(3, 1, 1); } /// Do the analysis void analyze(const Event& event) { const ChargedFinalState& bbc1 = apply(event, "BBC1"); const ChargedFinalState& bbc2 = apply(event, "BBC2"); if (bbc1.size()<1 || bbc2.size()<1) { MSG_DEBUG("Failed beam-beam-counter trigger"); vetoEvent; } const double weight = event.weight(); - const UnstableFinalState& ufs = apply(event, "UFS"); + const UnstableParticles& ufs = apply(event, "UFS"); foreach (const Particle& p, ufs.particles()) { if (p.absrap() < 0.5) { const PdgId pid = p.pid(); const double pT = p.pT() / GeV; switch (abs(pid)) { case PID::PIPLUS: if (pid < 0) _h_pT_vs_mass->fill(0.1396, pT, weight); break; case PID::PROTON: if (pid < 0) _h_pT_vs_mass->fill(0.9383, pT, weight); if (pT > 0.4) { pid > 0 ? _nBaryon[0]++ : _nAntiBaryon[0]++; pid > 0 ? _nWeightedBaryon[0]+=weight : _nWeightedAntiBaryon[0]+=weight; } break; case PID::K0S: if (pT > 0.2) { _h_pT_k0s->fill(pT, weight/pT); } _h_pT_vs_mass->fill(0.5056, pT, weight); break; case PID::K0L: _h_pT_vs_mass->fill(0.5056, pT, weight); break; case 113: // rho0(770) _h_pT_vs_mass->fill(0.7755, pT, weight); break; case 313: // K0*(892) _h_pT_vs_mass->fill(0.8960, pT, weight); break; case 333: // phi(1020) _h_pT_vs_mass->fill(1.0190, pT, weight); break; case 3214: // Sigma(1385) _h_pT_vs_mass->fill(1.3840, pT, weight); break; case 3124: // Lambda(1520) _h_pT_vs_mass->fill(1.5200, pT, weight); break; case PID::KPLUS: if (pid < 0) _h_pT_vs_mass->fill(0.4856, pT, weight); if (pT > 0.2) { pid > 0 ? _h_pT_kplus->fill(pT, weight/pT) : _h_pT_kminus->fill(pT, weight/pT); } break; case PID::LAMBDA: pid > 0 ? _h_pT_vs_mass->fill(1.1050, pT, weight) : _h_pT_vs_mass->fill(1.1250, pT, weight); if (pT > 0.3) { pid > 0 ? _h_pT_lambda->fill(pT, weight/pT) : _h_pT_lambdabar->fill(pT, weight/pT); pid > 0 ? _nBaryon[1]++ : _nAntiBaryon[1]++; pid > 0 ? _nWeightedBaryon[1]+=weight : _nWeightedAntiBaryon[1]+=weight; } break; case PID::XIMINUS: pid > 0 ? _h_pT_vs_mass->fill(1.3120, pT, weight) : _h_pT_vs_mass->fill(1.3320, pT, weight); if (pT > 0.5) { pid > 0 ? _h_pT_ximinus->fill(pT, weight/pT) : _h_pT_xiplus->fill(pT, weight/pT); pid > 0 ? _nBaryon[2]++ : _nAntiBaryon[2]++; pid > 0 ? _nWeightedBaryon[2]+=weight : _nWeightedAntiBaryon[2]+=weight; } break; case PID::OMEGAMINUS: _h_pT_vs_mass->fill(1.6720, pT, weight); if (pT > 0.5) { //_h_pT_omega->fill(pT, weight/pT); pid > 0 ? _nBaryon[3]++ : _nAntiBaryon[3]++; pid > 0 ? _nWeightedBaryon[3]+=weight : _nWeightedAntiBaryon[3]+=weight; } break; } } } _sumWeightSelected += event.weight(); } /// Finalize void finalize() { std::vector points; for (size_t i=0 ; i<4 ; i++) { if (_nWeightedBaryon[i]==0 || _nWeightedAntiBaryon[i]==0) { points.push_back(Point2D(i,0,0.5,0)); } else { double y = _nWeightedAntiBaryon[i]/_nWeightedBaryon[i]; double dy = sqrt( 1./_nAntiBaryon[i] + 1./_nBaryon[i] ); points.push_back(Point2D(i,y,0.5,y*dy)); } } _h_antibaryon_baryon_ratio->addPoints( points ); divide(_h_pT_lambdabar,_h_pT_lambda, _h_lambar_lam); divide(_h_pT_xiplus,_h_pT_ximinus, _h_xiplus_ximinus); scale(_h_pT_k0s, 1./(2*M_PI*_sumWeightSelected)); scale(_h_pT_kminus, 1./(2*M_PI*_sumWeightSelected)); scale(_h_pT_kplus, 1./(2*M_PI*_sumWeightSelected)); scale(_h_pT_lambda, 1./(2*M_PI*_sumWeightSelected)); scale(_h_pT_lambdabar, 1./(2*M_PI*_sumWeightSelected)); scale(_h_pT_ximinus, 1./(2*M_PI*_sumWeightSelected)); scale(_h_pT_xiplus, 1./(2*M_PI*_sumWeightSelected)); //scale(_h_pT_omega, 1./(2*M_PI*_sumWeightSelected)); MSG_DEBUG("sumOfWeights() = " << sumOfWeights()); MSG_DEBUG("_sumWeightSelected = " << _sumWeightSelected); } private: double _sumWeightSelected; int _nBaryon[4]; int _nAntiBaryon[4]; double _nWeightedBaryon[4]; double _nWeightedAntiBaryon[4]; Histo1DPtr _h_pT_k0s, _h_pT_kminus, _h_pT_kplus, _h_pT_lambda, _h_pT_lambdabar, _h_pT_ximinus, _h_pT_xiplus; //Histo1DPtr _h_pT_omega; Scatter2DPtr _h_antibaryon_baryon_ratio; Profile1DPtr _h_pT_vs_mass; Scatter2DPtr _h_lambar_lam; Scatter2DPtr _h_xiplus_ximinus; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(STAR_2006_S6860818); }