diff --git a/analyses/pluginATLAS/ATLAS_2011_I944826.cc b/analyses/pluginATLAS/ATLAS_2011_I944826.cc --- a/analyses/pluginATLAS/ATLAS_2011_I944826.cc +++ b/analyses/pluginATLAS/ATLAS_2011_I944826.cc @@ -1,258 +1,258 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" namespace Rivet { class ATLAS_2011_I944826 : public Analysis { public: /// Constructor ATLAS_2011_I944826() : Analysis("ATLAS_2011_I944826") {} /// Book histograms and initialise projections before the run void init() { book(_sum_w_ks , "ks"); book(_sum_w_lambda, "lambda"); book(_sum_w_passed, "passed"); UnstableFinalState ufs(Cuts::pT > 100*MeV); declare(ufs, "UFS"); ChargedFinalState mbts(Cuts::absetaIn(2.09, 3.84)); declare(mbts, "MBTS"); IdentifiedFinalState nstable(Cuts::abseta < 2.5 && Cuts::pT >= 100*MeV); nstable.acceptIdPair(PID::ELECTRON) .acceptIdPair(PID::MUON) .acceptIdPair(PID::PIPLUS) .acceptIdPair(PID::KPLUS) .acceptIdPair(PID::PROTON); declare(nstable, "nstable"); if (fuzzyEquals(sqrtS()/GeV, 7000, 1e-3)) { book(_hist_Ks_pT ,1, 1, 1); book(_hist_Ks_y ,2, 1, 1); book(_hist_Ks_mult ,3, 1, 1); book(_hist_L_pT ,7, 1, 1); book(_hist_L_y ,8, 1, 1); book(_hist_L_mult ,9, 1, 1); book(_hist_Ratio_v_y ,13, 1, 1); book(_hist_Ratio_v_pT,14, 1, 1); // book(_temp_lambda_v_y, "TMP/lambda_v_y", 10, 0.0, 2.5); book(_temp_lambdabar_v_y, "TMP/lambdabar_v_y", 10, 0.0, 2.5); book(_temp_lambda_v_pT, "TMP/lambda_v_pT", 18, 0.5, 4.1); book(_temp_lambdabar_v_pT, "TMP/lambdabar_v_pT", 18, 0.5, 4.1); } else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) { book(_hist_Ks_pT ,4, 1, 1); book(_hist_Ks_y ,5, 1, 1); book(_hist_Ks_mult ,6, 1, 1); book(_hist_L_pT ,10, 1, 1); book(_hist_L_y ,11, 1, 1); book(_hist_L_mult ,12, 1, 1); book(_hist_Ratio_v_y ,15, 1, 1); book(_hist_Ratio_v_pT,16, 1, 1); // book(_temp_lambda_v_y, "TMP/lambda_v_y", 5, 0.0, 2.5); book(_temp_lambdabar_v_y, "TMP/lambdabar_v_y", 5, 0.0, 2.5); book(_temp_lambda_v_pT, "TMP/lambda_v_pT", 8, 0.5, 3.7); book(_temp_lambdabar_v_pT, "TMP/lambdabar_v_pT", 8, 0.5, 3.7); } } // This function is required to impose the flight time cuts on Kaons and Lambdas double getPerpFlightDistance(const Rivet::Particle& p) { const HepMC::GenParticle* genp = p.genParticle(); const HepMC::GenVertex* prodV = genp->production_vertex(); const HepMC::GenVertex* decV = genp->end_vertex(); const HepMC::ThreeVector prodPos = prodV->point3d(); if (decV) { const HepMC::ThreeVector decPos = decV->point3d(); double dy = prodPos.y() - decPos.y(); double dx = prodPos.x() - decPos.x(); return add_quad(dx, dy); } return numeric_limits::max(); } bool daughtersSurviveCuts(const Rivet::Particle& p) { // We require the Kshort or Lambda to decay into two charged // particles with at least pT = 100 MeV inside acceptance region const HepMC::GenParticle* genp = p.genParticle(); const HepMC::GenVertex* decV = genp->end_vertex(); bool decision = true; if (!decV) return false; if (decV->particles_out_size() == 2) { std::vector pTs; std::vector charges; std::vector etas; for (const HepMC::GenParticle* gp : particles(decV, HepMC::children)) { pTs.push_back(gp->momentum().perp()); etas.push_back(fabs(gp->momentum().eta())); - charges.push_back( Rivet::PID::threeCharge(gp->pdg_id()) ); + charges.push_back( Rivet::PID::charge3(gp->pdg_id()) ); // gp->print(); } if ( (pTs[0]/Rivet::GeV < 0.1) || (pTs[1]/Rivet::GeV < 0.1) ) { decision = false; MSG_DEBUG("Failed pT cut: " << pTs[0]/Rivet::GeV << " " << pTs[1]/Rivet::GeV); } if ( etas[0] > 2.5 || etas[1] > 2.5 ) { decision = false; MSG_DEBUG("Failed eta cut: " << etas[0] << " " << etas[1]); } if ( charges[0] * charges[1] >= 0 ) { decision = false; MSG_DEBUG("Failed opposite charge cut: " << charges[0] << " " << charges[1]); } } else { decision = false; MSG_DEBUG("Failed nDaughters cut: " << decV->particles_out_size()); } return decision; } /// Perform the per-event analysis void analyze(const Event& event) { // ATLAS MBTS trigger requirement of at least one hit in either hemisphere if (apply(event, "MBTS").size() < 1) { MSG_DEBUG("Failed trigger cut"); vetoEvent; } // Veto event also when we find less than 2 particles in the acceptance region of type 211,2212,11,13,321 if (apply(event, "nstable").size() < 2) { MSG_DEBUG("Failed stable particle cut"); vetoEvent; } _sum_w_passed->fill(); // This ufs holds all the Kaons and Lambdas const UnstableFinalState& ufs = apply(event, "UFS"); // Some conters int n_KS0 = 0; int n_LAMBDA = 0; // Particle loop for (const Particle& p : ufs.particles()) { // General particle quantities const double pT = p.pT(); const double y = p.rapidity(); const PdgId apid = p.abspid(); double flightd = 0.0; // Look for Kaons, Lambdas switch (apid) { case PID::K0S: flightd = getPerpFlightDistance(p); if (!inRange(flightd/mm, 4., 450.) ) { MSG_DEBUG("Kaon failed flight distance cut:" << flightd); break; } if (daughtersSurviveCuts(p) ) { _hist_Ks_y ->fill(y); _hist_Ks_pT->fill(pT/GeV); _sum_w_ks->fill(); n_KS0++; } break; case PID::LAMBDA: if (pT < 0.5*GeV) { // Lambdas have an additional pT cut of 500 MeV MSG_DEBUG("Lambda failed pT cut:" << pT/GeV << " GeV"); break; } flightd = getPerpFlightDistance(p); if (!inRange(flightd/mm, 17., 450.)) { MSG_DEBUG("Lambda failed flight distance cut:" << flightd/mm << " mm"); break; } if ( daughtersSurviveCuts(p) ) { if (p.pid() == PID::LAMBDA) { _temp_lambda_v_y->fill(fabs(y)); _temp_lambda_v_pT->fill(pT/GeV); _hist_L_y->fill(y); _hist_L_pT->fill(pT/GeV); _sum_w_lambda->fill(); n_LAMBDA++; } else if (p.pid() == -PID::LAMBDA) { _temp_lambdabar_v_y->fill(fabs(y)); _temp_lambdabar_v_pT->fill(pT/GeV); } } break; } } // Fill multiplicity histos _hist_Ks_mult->fill(n_KS0); _hist_L_mult->fill(n_LAMBDA); } /// Normalise histograms etc., after the run void finalize() { MSG_DEBUG("# Events that pass the trigger: " << dbl(*_sum_w_passed)); MSG_DEBUG("# Kshort events: " << dbl(*_sum_w_ks)); MSG_DEBUG("# Lambda events: " << dbl(*_sum_w_lambda)); /// @todo Replace with normalize()? scale(_hist_Ks_pT, 1.0 / *_sum_w_ks); scale(_hist_Ks_y, 1.0 / *_sum_w_ks); scale(_hist_Ks_mult, 1.0 / *_sum_w_passed); /// @todo Replace with normalize()? scale(_hist_L_pT, 1.0 / *_sum_w_lambda); scale(_hist_L_y, 1.0 / *_sum_w_lambda); scale(_hist_L_mult, 1.0 / *_sum_w_passed); // Division of histograms to obtain lambda_bar/lambda ratios divide(_temp_lambdabar_v_y, _temp_lambda_v_y, _hist_Ratio_v_y); divide(_temp_lambdabar_v_pT, _temp_lambda_v_pT, _hist_Ratio_v_pT); } private: /// Counters CounterPtr _sum_w_ks, _sum_w_lambda, _sum_w_passed; /// @name Persistent histograms //@{ Histo1DPtr _hist_Ks_pT, _hist_Ks_y, _hist_Ks_mult; Histo1DPtr _hist_L_pT, _hist_L_y, _hist_L_mult; Scatter2DPtr _hist_Ratio_v_pT, _hist_Ratio_v_y; //@} /// @name Temporary histograms //@{ Histo1DPtr _temp_lambda_v_y, _temp_lambdabar_v_y; Histo1DPtr _temp_lambda_v_pT, _temp_lambdabar_v_pT; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2011_I944826); } diff --git a/analyses/pluginATLAS/ATLAS_2012_CONF_2012_109.cc b/analyses/pluginATLAS/ATLAS_2012_CONF_2012_109.cc --- a/analyses/pluginATLAS/ATLAS_2012_CONF_2012_109.cc +++ b/analyses/pluginATLAS/ATLAS_2012_CONF_2012_109.cc @@ -1,362 +1,362 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// @author Peter Richardson class ATLAS_2012_CONF_2012_109 : public Analysis { public: /// Constructor ATLAS_2012_CONF_2012_109() : Analysis("ATLAS_2012_CONF_2012_109") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Projection to find the electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 20*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Projection to find the muons IdentifiedFinalState muons(Cuts::abseta < 2.4 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Jet finder VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // All tracks (to do deltaR with leptons) declare(ChargedFinalState(Cuts::abseta < 3.0), "cfs"); // Used for pTmiss (N.B. the real 'vfs' extends beyond 4.5 to |eta| = 4.9) declare(VisibleFinalState(Cuts::abseta < 4.5), "vfs"); // Book histograms book(_count_A_tight ,"count_A_tight" , 1, 0., 1.); book(_count_A_medium ,"count_A_medium" , 1, 0., 1.); book(_count_A_loose ,"count_A_loose" , 1, 0., 1.); book(_count_B_tight ,"count_B_tight" , 1, 0., 1.); book(_count_B_medium ,"count_B_medium" , 1, 0., 1.); book(_count_C_tight ,"count_C_tight" , 1, 0., 1.); book(_count_C_medium ,"count_C_medium" , 1, 0., 1.); book(_count_C_loose ,"count_C_loose" , 1, 0., 1.); book(_count_D_tight ,"count_D_tight" , 1, 0., 1.); book(_count_E_tight ,"count_E_tight" , 1, 0., 1.); book(_count_E_medium ,"count_E_medium" , 1, 0., 1.); book(_count_E_loose ,"count_E_loose" , 1, 0., 1.); book(_hist_meff_A_medium ,"meff_A_medium" , 40, 0., 4000.); book(_hist_meff_A_tight ,"meff_A_tight" , 40, 0., 4000.); book(_hist_meff_B_medium ,"meff_B_medium" , 40, 0., 4000.); book(_hist_meff_B_tight ,"meff_B_tight" , 40, 0., 4000.); book(_hist_meff_C_medium ,"meff_C_medium" , 40, 0., 4000.); book(_hist_meff_C_tight ,"meff_C_tight" , 40, 0., 4000.); book(_hist_meff_D ,"meff_D" , 40, 0., 4000.); book(_hist_meff_E_loose ,"meff_E_loose" , 40, 0., 4000.); book(_hist_meff_E_medium ,"meff_E_medium" , 40, 0., 4000.); book(_hist_meff_E_tight ,"meff_E_tight" , 40, 0., 4000.); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; Jets cand_jets; const Jets jets = apply(event, "AntiKtJets04").jetsByPt(20.0*GeV); for (const Jet& jet : jets) { if ( fabs( jet.eta() ) < 4.9 ) { cand_jets.push_back(jet); } } const Particles cand_e = apply(event, "elecs").particlesByPt(); // Muon isolation not mentioned in hep-exp 1109.6572 but assumed to still be applicable Particles cand_mu; const Particles chg_tracks = apply(event, "cfs").particles(); const Particles muons = apply(event, "muons").particlesByPt(); for (const Particle& mu : muons) { double pTinCone = -mu.pT(); for (const Particle& track : chg_tracks) { if ( deltaR(mu.momentum(),track.momentum()) <= 0.2 ) { pTinCone += track.pT(); } } if ( pTinCone < 1.8*GeV ) cand_mu.push_back(mu); } // Resolve jet-lepton overlap for jets with |eta| < 2.8 Jets recon_jets; for ( const Jet& jet : cand_jets ) { if ( fabs( jet.eta() ) >= 2.8 ) continue; bool away_from_e = true; for ( const Particle & e : cand_e ) { if ( deltaR(e.momentum(),jet.momentum()) <= 0.2 ) { away_from_e = false; break; } } if ( away_from_e ) recon_jets.push_back( jet ); } Particles recon_e, recon_mu; for ( const Particle & e : cand_e ) { bool away = true; for ( const Jet& jet : recon_jets ) { if ( deltaR(e.momentum(),jet.momentum()) < 0.4 ) { away = false; break; } } if ( away ) recon_e.push_back( e ); } for ( const Particle & mu : cand_mu ) { bool away = true; for ( const Jet& jet : recon_jets ) { if ( deltaR(mu.momentum(),jet.momentum()) < 0.4 ) { away = false; break; } } if ( away ) recon_mu.push_back( mu ); } // pTmiss // Based on all candidate electrons, muons and jets, plus everything else with |eta| < 4.5 // i.e. everything in our projection "vfs" plus the jets with |eta| > 4.5 Particles vfs_particles = apply(event, "vfs").particles(); FourMomentum pTmiss; for ( const Particle & p : vfs_particles ) { pTmiss -= p.momentum(); } for ( const Jet& jet : cand_jets ) { if ( fabs( jet.eta() ) > 4.5 ) pTmiss -= jet.momentum(); } double eTmiss = pTmiss.pT(); // no electron pT> 20 or muons pT>10 if ( !recon_mu.empty() || !recon_e.empty() ) { MSG_DEBUG("Charged leptons left after selection"); vetoEvent; } if ( eTmiss <= 160 * GeV ) { MSG_DEBUG("Not enough eTmiss: " << eTmiss << " < 130"); vetoEvent; } // check the hardest two jets if ( recon_jets.size()<2 || recon_jets[0].pT() <= 130.0 * GeV || recon_jets[0].pT() <= 60.0 * GeV ) { MSG_DEBUG("No hard leading jet in " << recon_jets.size() << " jets"); vetoEvent; } // check the charged and EM fractions of the hard jets to avoid photons for (unsigned int ix = 0; ix < 2; ++ix) { // jets over 100 GeV if (recon_jets[ix].pT() < 100*GeV || recon_jets[ix].eta() > 2.) continue; ///< @todo Should be |eta|? double fch(0.), fem(0.), eTotal(0.); for(const Particle & part : recon_jets[ix].particles()) { long id = part.abspid(); - if(PID::threeCharge(id)!=0) + if(PID::charge3(id)!=0) fch += part.E(); if (id == PID::PHOTON || id == PID::ELECTRON || id == PID::PI0) fem += part.E(); } fch /= eTotal; fem /= eTotal; // remove events with hard photon if (fch < 0.02 || (fch < 0.05 && fem > 0.09)) vetoEvent; } // ==================== observables ==================== int Njets = 0; double min_dPhi_All = 999.999; ///< @todo Use std::numeric_limits! double min_dPhi_2 = 999.999; ///< @todo Use std::numeric_limits! double min_dPhi_3 = 999.999; ///< @todo Use std::numeric_limits! double pTmiss_phi = pTmiss.phi(); for ( const Jet& jet : recon_jets ) { if ( jet.pT() < 40*GeV ) continue; double dPhi = deltaPhi( pTmiss_phi, jet.phi()); if ( Njets < 2 ) min_dPhi_2 = min( min_dPhi_2, dPhi ); if ( Njets < 3 ) min_dPhi_3 = min( min_dPhi_3, dPhi ); min_dPhi_All = min( min_dPhi_All, dPhi ); ++Njets; } // inclusive meff double m_eff_inc = eTmiss; for ( const Jet& jet : recon_jets ) { double perp = jet.pT(); if(perp>40.) m_eff_inc += perp; } // region A double m_eff_Nj = eTmiss + recon_jets[0].pT() + recon_jets[1].pT(); if( min_dPhi_2 > 0.4 && eTmiss/m_eff_Nj > 0.3 ) { _hist_meff_A_tight ->fill(m_eff_inc,weight); if(eTmiss/m_eff_Nj > 0.4) _hist_meff_A_medium->fill(m_eff_inc,weight); if(m_eff_inc>1900.) _count_A_tight ->fill(0.5,weight); if(m_eff_inc>1300. && eTmiss/m_eff_Nj > 0.4) _count_A_medium->fill(0.5,weight); if(m_eff_inc>1300. && eTmiss/m_eff_Nj > 0.4) _count_A_loose ->fill(0.5,weight); } // for rest of regions 3 jets pT> 60 needed if(recon_jets.size()<3 || recon_jets[2].perp()<60.) vetoEvent; // region B m_eff_Nj += recon_jets[2].perp(); if( min_dPhi_3 > 0.4 && eTmiss/m_eff_Nj > 0.25 ) { _hist_meff_B_tight->fill(m_eff_inc,weight); if(eTmiss/m_eff_Nj > 0.3) _hist_meff_B_medium->fill(m_eff_inc,weight); if(m_eff_inc>1900.) _count_B_tight ->fill(0.5,weight); if(m_eff_inc>1300. && eTmiss/m_eff_Nj > 0.3) _count_B_medium->fill(0.5,weight); } // for rest of regions 4 jets pT> 60 needed if(recon_jets.size()<4 || recon_jets[3].perp()<60.) vetoEvent; // region C m_eff_Nj += recon_jets[3].perp(); if( min_dPhi_3 > 0.4 && min_dPhi_All > 0.2 && eTmiss/m_eff_Nj > 0.25 ) { _hist_meff_C_tight->fill(m_eff_inc,weight); if( eTmiss/m_eff_Nj > 0.3 ) _hist_meff_C_medium->fill(m_eff_inc,weight); if(m_eff_inc>1900.) _count_C_tight ->fill(0.5,weight); if(m_eff_inc>1300. && eTmiss/m_eff_Nj > 0.3) _count_C_medium->fill(0.5,weight); if(m_eff_inc>1000. && eTmiss/m_eff_Nj > 0.3) _count_C_loose ->fill(0.5,weight); } // for rest of regions 5 jets pT> 40 needed if(recon_jets.size()<5 || recon_jets[4].perp()<40.) vetoEvent; // region D m_eff_Nj += recon_jets[4].perp(); if( min_dPhi_3 > 0.4 && min_dPhi_All > 0.2 && eTmiss/m_eff_Nj > 0.15 ) { _hist_meff_D->fill(m_eff_inc,weight); if(m_eff_inc>1700.) _count_D_tight ->fill(0.5,weight); } // for rest of regions 6 jets pT> 40 needed if(recon_jets.size()<6 || recon_jets[5].perp()<40.) vetoEvent; // region E m_eff_Nj += recon_jets[5].perp(); if( min_dPhi_3 > 0.4 && min_dPhi_All > 0.2 && eTmiss/m_eff_Nj > 0.15 ) { _hist_meff_E_tight->fill(m_eff_inc,weight); if( eTmiss/m_eff_Nj > 0.25 ) _hist_meff_E_medium->fill(m_eff_inc,weight); if( eTmiss/m_eff_Nj > 0.3 ) _hist_meff_E_loose->fill(m_eff_inc,weight); if(m_eff_inc>1400.) _count_E_tight ->fill(0.5,weight); if(m_eff_inc>1300.&& eTmiss/m_eff_Nj > 0.25 ) _count_E_medium->fill(0.5,weight); if(m_eff_inc>1000.&& eTmiss/m_eff_Nj > 0.3 ) _count_E_loose ->fill(0.5,weight); } } void finalize() { double norm = crossSection()/femtobarn*5.8/sumOfWeights(); // these are number of events at 5.8fb^-1 per 100 GeV scale( _hist_meff_A_medium , 100. * norm ); scale( _hist_meff_A_tight , 100. * norm ); scale( _hist_meff_B_medium , 100. * norm ); scale( _hist_meff_B_tight , 100. * norm ); scale( _hist_meff_C_medium , 100. * norm ); scale( _hist_meff_C_tight , 100. * norm ); scale( _hist_meff_D , 100. * norm ); scale( _hist_meff_E_loose , 100. * norm ); scale( _hist_meff_E_medium , 100. * norm ); scale( _hist_meff_E_tight , 100. * norm ); // these are number of events at 5.8fb^-1 scale(_count_A_tight ,norm); scale(_count_A_medium ,norm); scale(_count_A_loose ,norm); scale(_count_B_tight ,norm); scale(_count_B_medium ,norm); scale(_count_C_tight ,norm); scale(_count_C_medium ,norm); scale(_count_C_loose ,norm); scale(_count_D_tight ,norm); scale(_count_E_tight ,norm); scale(_count_E_medium ,norm); scale(_count_E_loose ,norm); } //@} private: Histo1DPtr _count_A_tight; Histo1DPtr _count_A_medium; Histo1DPtr _count_A_loose; Histo1DPtr _count_B_tight; Histo1DPtr _count_B_medium; Histo1DPtr _count_C_tight; Histo1DPtr _count_C_medium; Histo1DPtr _count_C_loose; Histo1DPtr _count_D_tight; Histo1DPtr _count_E_tight; Histo1DPtr _count_E_medium; Histo1DPtr _count_E_loose; Histo1DPtr _hist_meff_A_medium; Histo1DPtr _hist_meff_A_tight; Histo1DPtr _hist_meff_B_medium; Histo1DPtr _hist_meff_B_tight; Histo1DPtr _hist_meff_C_medium; Histo1DPtr _hist_meff_C_tight; Histo1DPtr _hist_meff_D; Histo1DPtr _hist_meff_E_loose; Histo1DPtr _hist_meff_E_medium; Histo1DPtr _hist_meff_E_tight; }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_CONF_2012_109); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1095236.cc b/analyses/pluginATLAS/ATLAS_2012_I1095236.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1095236.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1095236.cc @@ -1,326 +1,326 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @author Peter Richardson class ATLAS_2012_I1095236 : public Analysis { public: /// Constructor ATLAS_2012_I1095236() : Analysis("ATLAS_2012_I1095236") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Projection to find the electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 20*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Projection to find the muons IdentifiedFinalState muons(Cuts::abseta < 2.4 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Jet finder VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // All tracks (to do deltaR with leptons) declare(ChargedFinalState(Cuts::abseta < 3.0),"cfs"); // Used for pTmiss declare(VisibleFinalState(-4.9,4.9),"vfs"); // Book histograms book(_count_SR0_A1 ,"count_SR0_A1", 1, 0., 1.); book(_count_SR0_B1 ,"count_SR0_B1", 1, 0., 1.); book(_count_SR0_C1 ,"count_SR0_C1", 1, 0., 1.); book(_count_SR0_A2 ,"count_SR0_A2", 1, 0., 1.); book(_count_SR0_B2 ,"count_SR0_B2", 1, 0., 1.); book(_count_SR0_C2 ,"count_SR0_C2", 1, 0., 1.); book(_count_SR1_D ,"count_SR1_D" , 1, 0., 1.); book(_count_SR1_E ,"count_SR1_E" , 1, 0., 1.); book(_hist_meff_SR0_A1 ,"hist_m_eff_SR0_A1", 14, 400., 1800.); book(_hist_meff_SR0_A2 ,"hist_m_eff_SR0_A2", 14, 400., 1800.); book(_hist_meff_SR1_D_e ,"hist_meff_SR1_D_e" , 16, 600., 2200.); book(_hist_meff_SR1_D_mu ,"hist_meff_SR1_D_mu", 16, 600., 2200.); book(_hist_met_SR0_A1 ,"hist_met_SR0_A1", 14, 0., 700.); book(_hist_met_SR0_A2 ,"hist_met_SR0_A2", 14, 0., 700.); book(_hist_met_SR0_D_e ,"hist_met_SR1_D_e" , 15, 0., 600.); book(_hist_met_SR0_D_mu ,"hist_met_SR1_D_mu", 15, 0., 600.); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; Jets cand_jets; const Jets jets = apply(event, "AntiKtJets04").jetsByPt(20.0*GeV); for (const Jet& jet : jets) { if ( fabs( jet.eta() ) < 2.8 ) { cand_jets.push_back(jet); } } const Particles cand_e = apply(event, "elecs").particlesByPt(); const Particles cand_mu = apply(event, "muons").particlesByPt(); // Resolve jet-lepton overlap for jets with |eta| < 2.8 Jets recon_jets; for ( const Jet& jet : cand_jets ) { if ( fabs( jet.eta() ) >= 2.8 ) continue; bool away_from_e = true; for ( const Particle & e : cand_e ) { if ( deltaR(e.momentum(),jet.momentum()) <= 0.2 ) { away_from_e = false; break; } } if ( away_from_e ) recon_jets.push_back( jet ); } // get the loose leptons used to define the 0 lepton channel Particles loose_e, loose_mu; for ( const Particle & e : cand_e ) { bool away = true; for ( const Jet& jet : recon_jets ) { if ( deltaR(e.momentum(),jet.momentum()) < 0.4 ) { away = false; break; } } if ( away ) loose_e.push_back( e ); } for ( const Particle & mu : cand_mu ) { bool away = true; for ( const Jet& jet : recon_jets ) { if ( deltaR(mu.momentum(),jet.momentum()) < 0.4 ) { away = false; break; } } if ( away ) loose_mu.push_back( mu ); } // tight leptons for the 1-lepton channel Particles tight_mu; Particles chg_tracks = apply(event, "cfs").particles(); for ( const Particle & mu : loose_mu) { if(mu.perp()<20.) continue; double pTinCone = -mu.pT(); for ( const Particle & track : chg_tracks ) { if ( deltaR(mu.momentum(),track.momentum()) <= 0.2 ) pTinCone += track.pT(); } if ( pTinCone < 1.8*GeV ) tight_mu.push_back(mu); } Particles tight_e; for ( const Particle & e : loose_e ) { if(e.perp()<25.) continue; double pTinCone = -e.perp(); for ( const Particle & track : chg_tracks ) { if ( deltaR(e.momentum(),track.momentum()) <= 0.2 ) pTinCone += track.pT(); } if (pTinCone/e.perp()<0.1) { tight_e.push_back(e); } } // pTmiss Particles vfs_particles = apply(event, "vfs").particles(); FourMomentum pTmiss; for ( const Particle & p : vfs_particles ) { pTmiss -= p.momentum(); } double eTmiss = pTmiss.pT(); // get the number of b-tagged jets unsigned int ntagged=0; for (const Jet & jet : recon_jets ) { if(jet.perp()>50. && abs(jet.eta())<2.5 && jet.bTagged() && rand()/static_cast(RAND_MAX)<=0.60) ++ntagged; } // ATLAS calo problem if(rand()/static_cast(RAND_MAX)<=0.42) { for ( const Jet & jet : recon_jets ) { double eta = jet.rapidity(); double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if(jet.perp()>50 && eta>-0.1&&eta<1.5&&phi>-0.9&&phi<-0.5) vetoEvent; } } // at least 1 b tag if(ntagged==0) vetoEvent; // minumum Et miss if(eTmiss<80.) vetoEvent; // at least 3 jets pT > 50 if(recon_jets.size()<3 || recon_jets[2].perp()<50.) vetoEvent; // m_eff double m_eff = eTmiss; for(unsigned int ix=0;ix<3;++ix) m_eff += recon_jets[ix].perp(); // delta Phi double min_dPhi = 999.999; double pTmiss_phi = pTmiss.phi(); for(unsigned int ix=0;ix<3;++ix) { min_dPhi = min( min_dPhi, deltaPhi( pTmiss_phi, recon_jets[ix].phi() ) ); } // 0-lepton channels if(loose_e.empty() && loose_mu.empty() && recon_jets[0].perp()>130. && eTmiss>130. && eTmiss/m_eff>0.25 && min_dPhi>0.4) { // jet charge cut bool jetCharge = true; for(unsigned int ix=0;ix<3;++ix) { if(fabs(recon_jets[ix].eta())>2.) continue; double trackpT=0; for(const Particle & p : recon_jets[ix].particles()) { - if(PID::threeCharge(p.pid())==0) continue; + if(PID::charge3(p.pid())==0) continue; trackpT += p.perp(); } if(trackpT/recon_jets[ix].perp()<0.05) jetCharge = false; } if(jetCharge) { // SR0-A region if(m_eff>500.) { _count_SR0_A1->fill(0.5,weight); _hist_meff_SR0_A1->fill(m_eff,weight); _hist_met_SR0_A1 ->fill(eTmiss,weight); if(ntagged>=2) { _count_SR0_A2->fill(0.5,weight); _hist_meff_SR0_A2->fill(m_eff,weight); _hist_met_SR0_A2 ->fill(eTmiss,weight); } } // SR0-B if(m_eff>700.) { _count_SR0_B1->fill(0.5,weight); if(ntagged>=2) _count_SR0_B2->fill(0.5,weight); } // SR0-C if(m_eff>900.) { _count_SR0_C1->fill(0.5,weight); if(ntagged>=2) _count_SR0_C2->fill(0.5,weight); } } } // 1-lepton channels if(tight_e.size() + tight_mu.size() == 1 && recon_jets.size()>=4 && recon_jets[3].perp()>50.&& recon_jets[0].perp()>60.) { Particle lepton = tight_e.empty() ? tight_mu[0] : tight_e[0]; m_eff += lepton.perp() + recon_jets[3].perp(); // transverse mass cut double mT = 2.*(lepton.perp()*eTmiss- lepton.px()*pTmiss.px()- lepton.py()*pTmiss.py()); mT = sqrt(mT); if(mT>100.&&m_eff>700.) { // D region _count_SR1_D->fill(0.5,weight); if(lepton.abspid()==PID::ELECTRON) { _hist_meff_SR1_D_e->fill(m_eff,weight); _hist_met_SR0_D_e->fill(eTmiss,weight); } else { _hist_meff_SR1_D_mu->fill(m_eff,weight); _hist_met_SR0_D_mu->fill(eTmiss,weight); } // E region if(eTmiss>200.) { _count_SR1_E->fill(0.5,weight); } } } } void finalize() { double norm = crossSection()/femtobarn*2.05/sumOfWeights(); // these are number of events at 2.05fb^-1 per 100 GeV scale( _hist_meff_SR0_A1 , 100. * norm ); scale( _hist_meff_SR0_A2 , 100. * norm ); scale( _hist_meff_SR1_D_e , 100. * norm ); scale( _hist_meff_SR1_D_mu , 100. * norm ); // these are number of events at 2.05fb^-1 per 50 GeV scale( _hist_met_SR0_A1, 50. * norm ); scale( _hist_met_SR0_A2, 40. * norm ); // these are number of events at 2.05fb^-1 per 40 GeV scale( _hist_met_SR0_D_e , 40. * norm ); scale( _hist_met_SR0_D_mu, 40. * norm ); // these are number of events at 2.05fb^-1 scale(_count_SR0_A1,norm); scale(_count_SR0_B1,norm); scale(_count_SR0_C1,norm); scale(_count_SR0_A2,norm); scale(_count_SR0_B2,norm); scale(_count_SR0_C2,norm); scale(_count_SR1_D ,norm); scale(_count_SR1_E ,norm); } //@} private: Histo1DPtr _count_SR0_A1; Histo1DPtr _count_SR0_B1; Histo1DPtr _count_SR0_C1; Histo1DPtr _count_SR0_A2; Histo1DPtr _count_SR0_B2; Histo1DPtr _count_SR0_C2; Histo1DPtr _count_SR1_D; Histo1DPtr _count_SR1_E; Histo1DPtr _hist_meff_SR0_A1; Histo1DPtr _hist_meff_SR0_A2; Histo1DPtr _hist_meff_SR1_D_e; Histo1DPtr _hist_meff_SR1_D_mu; Histo1DPtr _hist_met_SR0_A1; Histo1DPtr _hist_met_SR0_A2; Histo1DPtr _hist_met_SR0_D_e; Histo1DPtr _hist_met_SR0_D_mu; }; // This global object acts as a hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1095236); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1126136.cc b/analyses/pluginATLAS/ATLAS_2012_I1126136.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1126136.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1126136.cc @@ -1,297 +1,297 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2012_I1126136 : public Analysis { public: /// Constructor ATLAS_2012_I1126136() : Analysis("ATLAS_2012_I1126136") { } /// @name Analysis methods //@{ /// Book histograms and initialize projections before the run void init() { // projection to find the electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 20*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // projection to find the muons IdentifiedFinalState muons(Cuts::abseta < 2.4 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Jet finder VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // for pTmiss declare(VisibleFinalState(Cuts::abseta < 4.9),"vfs"); // Book histograms book(_count_SR_A ,"count_SR_A" , 1, 0., 1.); book(_count_SR_B ,"count_SR_B" , 1, 0., 1.); book(_hist_mjjj1 ,"hist_mjjj1" , 30 , 0. , 600. ); book(_hist_mjjj2 ,"hist_mjjj2" , 30 , 0. , 600. ); book(_hist_ETmiss ,"hist_ETmiss", 20 , 100. , 600. ); book(_hist_mT2 ,"hist_mT2" , 200, 0. , 1000. ); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // pTmiss FourMomentum pTmiss; for (const Particle& p : apply(event, "vfs").particles() ) { pTmiss -= p.momentum(); } double ETmiss = pTmiss.pT(); // require eTmiss > 150 if (ETmiss < 150*GeV) vetoEvent; // get the candiate jets Jets cand_jets; for ( const Jet& jet : apply(event, "AntiKtJets04").jetsByPt(20.0*GeV) ) { if (jet.abseta() < 4.5) cand_jets.push_back(jet); } // find the electrons Particles cand_e; for( const Particle& e : apply(event, "elecs").particlesByPt()) { // remove any leptons within 0.4 of any candidate jets bool e_near_jet = false; for ( const Jet& jet : cand_jets ) { double dR = deltaR(e, jet); if (inRange(dR, 0.2, 0.4)) { e_near_jet = true; break; } } if ( e_near_jet ) continue; cand_e.push_back(e); } // find the muons Particles cand_mu; for( const Particle& mu : apply(event, "muons").particlesByPt()) { // remove any leptons within 0.4 of any candidate jets bool mu_near_jet = false; for ( const Jet& jet : cand_jets ) { if ( deltaR(mu, jet) < 0.4 ) { mu_near_jet = true; break; } } if ( mu_near_jet ) continue; cand_mu.push_back(mu); } // veto events with leptons if( ! cand_e.empty() || ! cand_mu.empty() ) vetoEvent; // discard jets that overlap with electrons Jets recon_jets; for ( const Jet& jet : cand_jets ) { if (jet.abseta() > 2.8 || jet.pT() < 30*GeV) continue; bool away_from_e = true; for (const Particle& e : cand_e ) { if ( deltaR(e, jet) < 0.2 ) { away_from_e = false; break; } } if ( away_from_e ) recon_jets.push_back( jet ); } // find b jets Jets tight_bjets,loose_bjets; for(const Jet& jet : recon_jets) { /// @todo Should be abseta? if (!jet.bTagged() && jet.eta()>2.5) continue; double prob = rand()/static_cast(RAND_MAX); if (prob <= 0.60) tight_bjets.push_back(jet); if (prob <= 0.75) loose_bjets.push_back(jet); } // require >=1 tight or >=2 loose b-jets if (! ( !tight_bjets.empty() || loose_bjets.size()>=2) ) vetoEvent; // must be at least 6 jets with pT>30 if (recon_jets.size()<6 ) vetoEvent; // hardest > 130 if (recon_jets[0].perp() < 130. ) vetoEvent; // three hardest jets must be separated from etmiss for (unsigned int ix=0;ix<3;++ix) { if (deltaPhi(recon_jets[ix].momentum(),pTmiss)<0.2*PI) vetoEvent; } // remove events with tau like jets for (unsigned int ix=3;ix=0.2*PI) continue; // check the number of tracks between 1 and 4 unsigned int ncharged=0; for ( const Particle & particle : recon_jets[ix].particles()) { - if (PID::threeCharge(particle.pid())!=0) ++ncharged; + if (PID::charge3(particle.pid())!=0) ++ncharged; } if (ncharged==0 || ncharged>4) continue; // calculate transverse mass and reject if < 100 double mT = 2.*recon_jets[ix].perp()*ETmiss -recon_jets[ix].px()*pTmiss.px() -recon_jets[ix].py()*pTmiss.py(); if (mT<100.) vetoEvent; } // if 2 loose b-jets apply mT cut if (loose_bjets.size()>=2) { // find b-jet closest to eTmiss double minR(1e30); unsigned int ijet(0); for(unsigned int ix=0;ixfill(mjjj1,weight); _hist_mjjj2->fill(mjjj2,weight); // require triplets in 80270.||mjjj2<80.||mjjj2>270.) vetoEvent; // counts in signal regions _count_SR_A->fill(0.5,weight); if(ETmiss>260.) _count_SR_B->fill(0.5,weight); _hist_ETmiss->fill(ETmiss,weight); const double m_T2 = mT2(pjjj1, pjjj2, pTmiss, 0.0); // zero mass invisibles _hist_mT2->fill(m_T2,weight); } //@} void finalize() { double norm = 4.7* crossSection()/sumOfWeights()/femtobarn; scale(_count_SR_A , norm ); scale(_count_SR_B , norm ); scale(_hist_mjjj1 , 20.*norm ); scale(_hist_ETmiss, 50.*norm ); scale(_hist_mjjj2 , 20.*norm ); scale(_hist_mT2 , norm ); } private: /// @name Histograms //@{ Histo1DPtr _count_SR_A; Histo1DPtr _count_SR_B; Histo1DPtr _hist_mjjj1; Histo1DPtr _hist_mjjj2; Histo1DPtr _hist_ETmiss; Histo1DPtr _hist_mT2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2012_I1126136); } diff --git a/analyses/pluginATLAS/ATLAS_2012_I1183818.cc b/analyses/pluginATLAS/ATLAS_2012_I1183818.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1183818.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1183818.cc @@ -1,239 +1,239 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" /// @author Peter Wijeratne /// @author Robindra Prabhu namespace Rivet { // A very basic analysis sensitive to ET flow in minbias and dijet events class ATLAS_2012_I1183818 : public Analysis { public: ATLAS_2012_I1183818() : Analysis("ATLAS_2012_I1183818") {} public: void init() { const FinalState cnfs(-4.8, 4.8, 0*MeV); const ChargedFinalState cfs(-2.5, 2.5, 250*MeV); declare(cnfs, "FS"); declare(cfs, "CFS"); const FastJets jetsAntiKt4(cnfs, FastJets::ANTIKT, 0.4); declare(jetsAntiKt4, "AntiKt4Jets"); // ------- MINBIAS HISTOGRAMS -------- // // MB event counter book(m_chargedEvents, "m_chargedEvents"); book(_h_ETflowEta ,1, 1, 1); book(_h_SumETbin1 ,3, 1, 1); book(_h_SumETbin2 ,4, 1, 1); book(_h_SumETbin3 ,5, 1, 1); book(_h_SumETbin4 ,6, 1, 1); book(_h_SumETbin5 ,7, 1, 1); book(_h_SumETbin6 ,8, 1, 1); // ------- DIJET HISTOGRAMS -------- // // Dijet event counter book(m_events_dijets, "m_chargedEvents"); // sumET book(_h_transETflowEta , 2, 1, 1); book(_h_transSumETbin1 , 9, 1, 1); book(_h_transSumETbin2 ,10, 1, 1); book(_h_transSumETbin3 ,11, 1, 1); book(_h_transSumETbin4 ,12, 1, 1); book(_h_transSumETbin5 ,13, 1, 1); book(_h_transSumETbin6 ,14, 1, 1); } void analyze(const Event& event) { const FinalState& cfs = apply(event, "CFS"); bool isCharged = false; if (cfs.size() >= 2) { // event selection: > 2 charged particles with pT > 250.MeV and |eta| < 2.5 isCharged = true; m_chargedEvents->fill(); } const FinalState& cnfs = apply(event, "FS"); Particles particles; for( const Particle& p : cnfs.particles() ) { // enforce truth selection representing detected particle sensitivity double pp = p.p3().mod(); - if (PID::threeCharge(p.pid()) != 0 && pp < 0.5*GeV) continue; - if (PID::threeCharge(p.pid()) == 0 && pp < 0.2*GeV) continue; + if (PID::charge3(p.pid()) != 0 && pp < 0.5*GeV) continue; + if (PID::charge3(p.pid()) == 0 && pp < 0.2*GeV) continue; particles.push_back(p); } // get jets const FastJets& jetsAntiKt4 = apply(event, "AntiKt4Jets"); const Jets& jets = jetsAntiKt4.jetsByPt(20.0*GeV); // initialise sumET variables double sumETbin1 = 0; double sumETbin2 = 0; double sumETbin3 = 0; double sumETbin4 = 0; double sumETbin5 = 0; double sumETbin6 = 0; // if (passes event selection) if (isCharged) { for( const Particle& p : particles ) { ///calculate variables double ET = p.Et()/GeV; double eta = p.abseta(); // fill histograms _h_ETflowEta->fill(eta, ET); if (eta < 0.8) sumETbin1 += ET; else if (eta < 1.6) sumETbin2 += ET; else if (eta < 2.4) sumETbin3 += ET; else if (eta < 3.2) sumETbin4 += ET; else if (eta < 4.0) sumETbin5 += ET; else if (eta <= 4.8) sumETbin6 += ET; } // end of for _h_SumETbin1->fill(sumETbin1); _h_SumETbin2->fill(sumETbin2); _h_SumETbin3->fill(sumETbin3); _h_SumETbin4->fill(sumETbin4); _h_SumETbin5->fill(sumETbin5); _h_SumETbin6->fill(sumETbin6); } // --- do dijet analysis --- if ( jets.size() >= 2 && // require at least two jets jets[0].Et() >= 20.*GeV && // require two leading jets to pass ET cuts jets[1].Et() >= 20.*GeV && fabs(jets[0].eta()) < 2.5 && // require leading jets to be central fabs(jets[1].eta()) < 2.5 && deltaPhi(jets[0], jets[1]) > 2.5 && // require back-to-back topology jets[1].Et()/jets[0].Et() >= 0.5) { //require ET-balance // found an event that satisfies dijet selection, now fill histograms... // initialise dijet sumET variables double trans_sumET_bin1 = 0.; double trans_sumET_bin2 = 0.; double trans_sumET_bin3 = 0.; double trans_sumET_bin4 = 0.; double trans_sumET_bin5 = 0.; double trans_sumET_bin6 = 0.; m_events_dijets->fill(); // loop over all particles and check their relation to leading jet for( const Particle& particle : particles ) { // calculate variables double dPhi = deltaPhi( jets[0], particle.momentum() ); double ET = particle.Et()/GeV; double eta = fabs(particle.eta()); // Transverse region if ( dPhi > 1./3.*M_PI && dPhi < 2./3.*M_PI ) { _h_transETflowEta->fill( eta, ET ); if (eta < 0.8) { trans_sumET_bin1 += ET; } else if (eta < 1.6) { trans_sumET_bin2 += ET; } else if (eta < 2.4) { trans_sumET_bin3 += ET; } else if (eta < 3.2) { trans_sumET_bin4 += ET; } else if (eta < 4.0) { trans_sumET_bin5 += ET; } else if (eta <= 4.8) { trans_sumET_bin6 += ET; } } } // end loop over particles _h_transSumETbin1->fill( trans_sumET_bin1); _h_transSumETbin2->fill( trans_sumET_bin2); _h_transSumETbin3->fill( trans_sumET_bin3); _h_transSumETbin4->fill( trans_sumET_bin4); _h_transSumETbin5->fill( trans_sumET_bin5); _h_transSumETbin6->fill( trans_sumET_bin6); } // end of dijet selection cuts } void finalize() { /// several scale factors here: /// 1. nEvents (m_chargedEvents) /// 2. phase-space (2*M_PI) /// 3. double binning due to symmetrisation (2) scale( _h_ETflowEta, 1./m_chargedEvents->val()/(4.*M_PI) ); scale( _h_SumETbin1, 1./m_chargedEvents->val() ); scale( _h_SumETbin2, 1./m_chargedEvents->val() ); scale( _h_SumETbin3, 1./m_chargedEvents->val() ); scale( _h_SumETbin4, 1./m_chargedEvents->val() ); scale( _h_SumETbin5, 1./m_chargedEvents->val() ); scale( _h_SumETbin6, 1./m_chargedEvents->val() ); //Dijet analysis // Dijet scale factors: //1. number of events passing dijet selection //2. phase-space: 1. / 2/3*M_PI //3. double binning due to symmetrisation in |eta| plots : 1/2 scale( _h_transETflowEta, 1./m_events_dijets->val() * 1./(4./3.*M_PI) ); scale( _h_transSumETbin1, 1./m_events_dijets->val() ); scale( _h_transSumETbin2, 1./m_events_dijets->val() ); scale( _h_transSumETbin3, 1./m_events_dijets->val() ); scale( _h_transSumETbin4, 1./m_events_dijets->val() ); scale( _h_transSumETbin5, 1./m_events_dijets->val() ); scale( _h_transSumETbin6, 1./m_events_dijets->val() ); } private: // Event counts CounterPtr m_chargedEvents; CounterPtr m_events_dijets; // Minbias-analysis: variable + histograms Histo1DPtr _h_ETflowEta; Histo1DPtr _h_SumETbin1; Histo1DPtr _h_SumETbin2; Histo1DPtr _h_SumETbin3; Histo1DPtr _h_SumETbin4; Histo1DPtr _h_SumETbin5; Histo1DPtr _h_SumETbin6; // Transverse region Histo1DPtr _h_transETflowEta; Histo1DPtr _h_transSumETbin1; Histo1DPtr _h_transSumETbin2; Histo1DPtr _h_transSumETbin3; Histo1DPtr _h_transSumETbin4; Histo1DPtr _h_transSumETbin5; Histo1DPtr _h_transSumETbin6; }; DECLARE_RIVET_PLUGIN(ATLAS_2012_I1183818); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1190187.cc b/analyses/pluginATLAS/ATLAS_2013_I1190187.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1190187.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1190187.cc @@ -1,258 +1,258 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// ATLAS Wee Wemu Wmumu analysis at Z TeV class ATLAS_2013_I1190187 : public Analysis { public: /// Default constructor ATLAS_2013_I1190187() : Analysis("ATLAS_2013_I1190187") { } void init() { FinalState fs; Cut etaRanges_EL = (Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.47)) && Cuts::pT > 20*GeV; Cut etaRanges_MU = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; MissingMomentum met(fs); declare(met, "MET"); IdentifiedFinalState Photon(fs); Photon.acceptIdPair(PID::PHOTON); IdentifiedFinalState bare_EL(fs); bare_EL.acceptIdPair(PID::ELECTRON); IdentifiedFinalState bare_MU(fs); bare_MU.acceptIdPair(PID::MUON); IdentifiedFinalState neutrinoFS(fs); neutrinoFS.acceptNeutrinos(); declare(neutrinoFS, "Neutrinos"); //////////////////////////////////////////////////////// // DRESSED LEPTONS // 3.arg: 0.1 = dR(lep,phot) // 4.arg: true = do clustering // 7.arg: false = ignore photons from hadron or tau // ////////////////////////////////////////////////////////// DressedLeptons electronFS(Photon, bare_EL, 0.1, etaRanges_EL); declare(electronFS, "ELECTRON_FS"); DressedLeptons muonFS(Photon, bare_MU, 0.1, etaRanges_MU); declare(muonFS, "MUON_FS"); VetoedFinalState jetinput; jetinput.addVetoOnThisFinalState(bare_MU); jetinput.addVetoOnThisFinalState(neutrinoFS); FastJets jetpro(jetinput, FastJets::ANTIKT, 0.4); declare(jetpro, "jet"); // Book histograms book(_h_Wl1_pT_mumu ,1, 1, 2); book(_h_Wl1_pT_ee ,1, 1, 1); book(_h_Wl1_pT_emu ,1, 1, 3); book(_h_Wl1_pT_inclusive ,4, 1, 1); } /// Do the analysis void analyze(const Event& e) { const vector& muonFS = apply(e, "MUON_FS").dressedLeptons(); const vector& electronFS = apply(e, "ELECTRON_FS").dressedLeptons(); const MissingMomentum& met = apply(e, "MET"); vector dressed_lepton, isolated_lepton, fiducial_lepton; dressed_lepton.insert(dressed_lepton.end(), muonFS.begin(), muonFS.end()); dressed_lepton.insert(dressed_lepton.end(), electronFS.begin(), electronFS.end()); //////////////////////////////////////////////////////////////////////////// // OVERLAP REMOVAL // -electrons with dR(e,mu)<0.1 are removed // -lower pT electrons with dR(e,e)<0.1 are removed // //////////////////////////////////////////////////////////////////////////// for (DressedLepton& l1 : dressed_lepton) { bool l_isolated = true; for (DressedLepton& l2 : dressed_lepton) { if (l1 != l2 && l2.constituentLepton().abspid() == PID::ELECTRON) { double overlapControl_ll= deltaR(l1.constituentLepton(),l2.constituentLepton()); if (overlapControl_ll < 0.1) { l_isolated = false; // e/e overlap removal if (l1.constituentLepton().abspid() == PID::ELECTRON) { if (l1.constituentLepton().pT()>l2.constituentLepton().pT()) { isolated_lepton.push_back(l1);//keep e with highest pT } else { isolated_lepton.push_back(l2);//keep e with highest pT } } // e/mu overlap removal if (l1.constituentLepton().abspid() == PID::MUON) isolated_lepton.push_back(l1); //keep mu } } } if (l_isolated) isolated_lepton.push_back(l1); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// // PRESELECTION: // "isolated_lepton:" // * electron: pt>20 GeV, |eta|<1.37, 1.52<|eta|<2.47, dR(electron,muon)>0.1 // * muon: pt>20 GeV, |eta|<2.4 // * dR(l,l)>0.1 // // "fiducial_lepton"= isolated_lepton with // * 2 leptons (e or mu) // * leading lepton pt (pT_l1) >25 GeV // * opposite charged leptons // /////////////////////////////////////////////////////////////////////////////////////////////////////////// if (isolated_lepton.size() != 2) vetoEvent; sort(isolated_lepton.begin(), isolated_lepton.end(), cmpMomByPt); - if (isolated_lepton[0].pT() > 25*GeV && threeCharge(isolated_lepton[0]) != threeCharge(isolated_lepton[1])) { + if (isolated_lepton[0].pT() > 25*GeV && charge3(isolated_lepton[0]) != charge3(isolated_lepton[1])) { fiducial_lepton.insert(fiducial_lepton.end(), isolated_lepton.begin(), isolated_lepton.end()); } if (fiducial_lepton.size() == 0) vetoEvent; double pT_l1 = fiducial_lepton[0].pT(); double M_l1l2 = (fiducial_lepton[0].momentum() + fiducial_lepton[1].momentum()).mass(); double pT_l1l2 = (fiducial_lepton[0].momentum() + fiducial_lepton[1].momentum()).pT(); ///////////////////////////////////////////////////////////////////////// // JETS // -"alljets": found by "jetpro" projection && pT()>25 GeV && |y|<4.5 // -"vetojets": "alljets" && dR(electron,jet)>0.3 // ///////////////////////////////////////////////////////////////////////// Jets alljets, vetojets; for (const Jet& j : apply(e, "jet").jetsByPt(25)) { if (j.absrap() > 4.5 ) continue; alljets.push_back(j); bool deltaRcontrol = true; for (DressedLepton& fl : fiducial_lepton) { if (fl.constituentLepton().abspid() == PID::ELECTRON) { //electrons double deltaRjets = deltaR(fl.constituentLepton().momentum(), j.momentum(), RAPIDITY); if (deltaRjets <= 0.3) deltaRcontrol = false; //false if at least one electron is in the overlap region } } if (deltaRcontrol) vetojets.push_back(j); } ///////////////////////////////////////////////////////////////////////////////////////////////// // MISSING ETrel // -"mismom": fourvector of invisible momentum found by "met" projection // -"delta_phi": delta phi between mismom and the nearest "fiducial_lepton" or "vetojet" // -"MET_rel": missing transverse energy defined as: // *"mismom" for "delta_phi" >= (0.5*pi) // *"mismom.pT()*sin(delta_phi)" for "delta_phi" < (0.5*pi) // ///////////////////////////////////////////////////////////////////////////////////////////////// FourMomentum mismom; double MET_rel = 0, delta_phi = 0; mismom = -met.visibleMomentum(); vector vL_MET_angle, vJet_MET_angle; vL_MET_angle.push_back(fabs(deltaPhi(fiducial_lepton[0].momentum(), mismom))); vL_MET_angle.push_back(fabs(deltaPhi(fiducial_lepton[1].momentum(), mismom))); for (double& lM : vL_MET_angle) if (lM > M_PI) lM = 2*M_PI - lM; std::sort(vL_MET_angle.begin(), vL_MET_angle.end()); if (vetojets.size() == 0) delta_phi = vL_MET_angle[0]; if (vetojets.size() > 0) { for (Jet& vj : vetojets) { double jet_MET_angle = fabs(deltaPhi(vj.momentum(), mismom)); if (jet_MET_angle > M_PI) jet_MET_angle = 2*M_PI - jet_MET_angle; vJet_MET_angle.push_back(jet_MET_angle); } std::sort(vJet_MET_angle.begin(), vJet_MET_angle.end()); if (vL_MET_angle[0] <= vJet_MET_angle[0]) delta_phi = vL_MET_angle[0]; if (vL_MET_angle[0] > vJet_MET_angle[0]) delta_phi = vJet_MET_angle[0]; } if (delta_phi >= (0.5*M_PI)) delta_phi = 0.5*M_PI; MET_rel = mismom.pT()*sin(delta_phi); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // CUTS // -jetveto: event with at least one vetojet is vetoed // -M_Z: Z mass M_Z=91.1876*GeV // // * ee channel: MET_rel > 45 GeV, M_l1l2 > 15 GeV, |M_l1l2-M_Z| > 15 GeV, jetveto, pT_l1l2 > 30 GeV // * mumu channel: MET_rel > 45 GeV, M_l1l2 > 15 GeV, |M_l1l2-M_Z| > 15 GeV, jetveto, pT_l1l2 > 30 GeV // * emu channel: MET_rel > 25 GeV, M_l1l2 > 10 GeV, |M_l1l2-M_Z| > 0 GeV, jetveto, pT_l1l2 > 30 GeV // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ee channel if (fiducial_lepton[0].abspid() == PID::ELECTRON && fiducial_lepton[1].abspid() == PID::ELECTRON) { if (MET_rel <= 45*GeV) vetoEvent; if (M_l1l2 <= 15*GeV) vetoEvent; if (fabs(M_l1l2 - 91.1876*GeV) <= 15*GeV) vetoEvent; if (vetojets.size() != 0) vetoEvent; if (pT_l1l2 <= 30*GeV) vetoEvent; _h_Wl1_pT_ee->fill(sqrtS()*GeV); _h_Wl1_pT_inclusive->fill(pT_l1); } // mumu channel else if (fiducial_lepton[0].abspid() == PID::MUON && fiducial_lepton[1].abspid() == PID::MUON) { if (MET_rel <= 45*GeV) vetoEvent; if (M_l1l2 <= 15*GeV) vetoEvent; if (fabs(M_l1l2-91.1876*GeV) <= 15*GeV) vetoEvent; if (vetojets.size() != 0) vetoEvent; if (pT_l1l2 <= 30*GeV) vetoEvent; _h_Wl1_pT_mumu->fill(sqrtS()*GeV); _h_Wl1_pT_inclusive->fill(pT_l1); } // emu channel else if (fiducial_lepton[0].abspid() != fiducial_lepton[1].abspid()) { if (MET_rel <= 25*GeV) vetoEvent; if (M_l1l2 <= 10*GeV) vetoEvent; if (vetojets.size() != 0) vetoEvent; if (pT_l1l2 <= 30*GeV) vetoEvent; _h_Wl1_pT_emu->fill(sqrtS()*GeV); _h_Wl1_pT_inclusive->fill(pT_l1); } } /// Finalize void finalize() { const double norm = crossSection()/sumOfWeights()/femtobarn; scale(_h_Wl1_pT_ee, norm); scale(_h_Wl1_pT_mumu, norm); scale(_h_Wl1_pT_emu, norm); normalize(_h_Wl1_pT_inclusive, 1); } private: Histo1DPtr _h_Wl1_pT_ee, _h_Wl1_pT_mumu, _h_Wl1_pT_emu, _h_Wl1_pT_inclusive; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1190187); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1298811.cc b/analyses/pluginATLAS/ATLAS_2014_I1298811.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1298811.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1298811.cc @@ -1,196 +1,196 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2014_I1298811 : public Analysis { public: ATLAS_2014_I1298811() : Analysis("ATLAS_2014_I1298811") { } void init() { // Configure projections const FinalState fs(-4.8, 4.8, 0*MeV); declare(fs, "FS"); const FastJets jets(fs, FastJets::ANTIKT, 0.4); declare(jets, "Jets"); // Book histograms for (size_t itopo = 0; itopo < 2; ++itopo) { // Profiles for (size_t iregion = 0; iregion < 3; ++iregion) { book(_p_ptsumch_vs_ptlead[itopo][iregion] ,1+iregion, 1, itopo+1); book(_p_nch_vs_ptlead[itopo][iregion] ,4+iregion, 1, itopo+1); } book(_p_etsum25_vs_ptlead_trans[itopo] ,7, 1, itopo+1); book(_p_etsum48_vs_ptlead_trans[itopo] ,8, 1, itopo+1); book(_p_chratio_vs_ptlead_trans[itopo] ,9, 1, itopo+1); book(_p_ptmeanch_vs_ptlead_trans[itopo] ,10, 1, itopo+1); // 1D histos for (size_t iregion = 0; iregion < 3; ++iregion) { for (size_t ipt = 0; ipt < 4; ++ipt) { book(_h_ptsumch[ipt][itopo][iregion] ,13+3*ipt+iregion, 1, itopo+1); book(_h_nch[ipt][itopo][iregion] ,25+3*ipt+iregion, 1, itopo+1); } } } book(_p_ptmeanch_vs_nch_trans[0], 11, 1, 1); book(_p_ptmeanch_vs_nch_trans[1], 12, 1, 1); } void analyze(const Event& event) { // Find the jets with pT > 20 GeV and *rapidity* within 2.8 /// @todo Use Cuts instead rather than an eta cut in the proj and a y cut after const Jets alljets = apply(event, "Jets").jetsByPt(20*GeV); Jets jets; for (const Jet& j : alljets) if (j.absrap() < 2.8) jets.push_back(j); // Require at least one jet in the event if (jets.empty()) vetoEvent; // Identify the leading jet and its phi and pT const FourMomentum plead = jets[0].momentum(); const double philead = plead.phi(); const double etalead = plead.eta(); const double ptlead = plead.pT(); MSG_DEBUG("Leading object: pT = " << ptlead << ", eta = " << etalead << ", phi = " << philead); // Sum particle properties in the transverse regions int tmpnch[2] = {0,0}; double tmpptsum[2] = {0,0}; double tmpetsum48[2] = {0,0}; double tmpetsum25[2] = {0,0}; const Particles particles = apply(event, "FS").particles(); for (const Particle& p : particles) { // Only consider the transverse region(s), not toward or away if (!inRange(deltaPhi(p.phi(), philead), PI/3.0, TWOPI/3.0)) continue; // Work out which transverse side this particle is on const size_t iside = (mapAngleMPiToPi(p.phi() - philead) > 0) ? 0 : 1; MSG_TRACE(p.phi() << " vs. " << philead << ": " << iside); // Charged or neutral particle? - const bool charged = PID::threeCharge(p.pdgId()) != 0; + const bool charged = PID::charge3(p.pdgId()) != 0; // Track observables if (charged && fabs(p.eta()) < 2.5 && p.pT() > 500*MeV) { tmpnch[iside] += 1; tmpptsum[iside] += p.pT(); } // Cluster observables if ((charged && p.p3().mod() > 200*MeV) || (!charged && p.p3().mod() > 500*MeV)) { tmpetsum48[iside] += p.pT(); if (fabs(p.eta()) < 2.5) tmpetsum25[iside] += p.pT(); } } // Construct tot/max/min counts (for trans/max/min, indexed by iregion) const int nch[3] = { tmpnch[0] + tmpnch[1], std::max(tmpnch[0], tmpnch[1]), std::min(tmpnch[0], tmpnch[1]) }; const double ptsum[3] = { tmpptsum[0] + tmpptsum[1], std::max(tmpptsum[0], tmpptsum[1]), std::min(tmpptsum[0], tmpptsum[1]) }; const double etsum48[3] = { tmpetsum48[0] + tmpetsum48[1], std::max(tmpetsum48[0], tmpetsum48[1]), std::min(tmpetsum48[0], tmpetsum48[1]) }; const double etsum25[3] = { tmpetsum25[0] + tmpetsum25[1], std::max(tmpetsum25[0], tmpetsum25[1]), std::min(tmpetsum25[0], tmpetsum25[1]) }; ////////////////////////////////////////////////////////// // Now fill the histograms with the computed quantities // phi sizes of each trans/max/min region (for indexing by iregion) const double dphi[3] = { 2*PI/3.0, PI/3.0, PI/3.0 }; // Loop over inclusive jet and exclusive dijet configurations for (size_t itopo = 0; itopo < 2; ++itopo) { // Exit early if in the exclusive dijet iteration and the exclusive dijet cuts are not met if (itopo == 1) { if (jets.size() != 2) continue; const FourMomentum psublead = jets[1].momentum(); // Delta(phi) cut const double phisublead = psublead.phi(); if (deltaPhi(philead, phisublead) < 2.5) continue; // pT fraction cut const double ptsublead = psublead.pT(); if (ptsublead < 0.5*ptlead) continue; MSG_DEBUG("Exclusive dijet event"); } // Plot profiles and distributions which have no max/min region definition _p_etsum25_vs_ptlead_trans[itopo]->fill(ptlead/GeV, etsum25[0]/5.0/dphi[0]/GeV); _p_etsum48_vs_ptlead_trans[itopo]->fill(ptlead/GeV, etsum48[0]/9.6/dphi[0]/GeV); if (etsum25[0] > 0) { _p_chratio_vs_ptlead_trans[itopo]->fill(ptlead/GeV, ptsum[0]/etsum25[0]); } const double ptmean = safediv(ptsum[0], nch[0], -1); ///< Return -1 if div by zero if (ptmean >= 0) { _p_ptmeanch_vs_ptlead_trans[itopo]->fill(ptlead/GeV, ptmean/GeV); _p_ptmeanch_vs_nch_trans[itopo]->fill(nch[0], ptmean/GeV); } // Plot remaining profile and 1D observables, which are defined in all 3 tot/max/min regions for (size_t iregion = 0; iregion < 3; ++iregion) { _p_ptsumch_vs_ptlead[itopo][iregion]->fill(ptlead/GeV, ptsum[iregion]/5.0/dphi[iregion]/GeV); _p_nch_vs_ptlead[itopo][iregion]->fill(ptlead/GeV, nch[iregion]/5.0/dphi[iregion]); for (size_t ipt = 0; ipt < 4; ++ipt) { if (ipt == 1 && !inRange(ptlead/GeV, 20, 60)) continue; if (ipt == 2 && !inRange(ptlead/GeV, 60, 210)) continue; if (ipt == 3 && ptlead/GeV < 210) continue; _h_ptsumch[ipt][itopo][iregion]->fill(ptsum[iregion]/5.0/dphi[iregion]/GeV); _h_nch[ipt][itopo][iregion]->fill(nch[iregion]/5.0/dphi[iregion]); } } } } void finalize() { for (size_t iregion = 0; iregion < 3; ++iregion) { for (size_t itopo = 0; itopo < 2; ++itopo) { for (size_t ipt = 0; ipt < 4; ++ipt) { normalize(_h_ptsumch[ipt][itopo][iregion], 1.0); normalize(_h_nch[ipt][itopo][iregion], 1.0); } } } } private: /// @name Histogram arrays //@{ Profile1DPtr _p_ptsumch_vs_ptlead[2][3]; Profile1DPtr _p_nch_vs_ptlead[2][3]; Profile1DPtr _p_ptmeanch_vs_ptlead_trans[2]; Profile1DPtr _p_etsum25_vs_ptlead_trans[2]; Profile1DPtr _p_etsum48_vs_ptlead_trans[2]; Profile1DPtr _p_chratio_vs_ptlead_trans[2]; Profile1DPtr _p_ptmeanch_vs_nch_trans[2]; Histo1DPtr _h_ptsumch[4][2][3]; Histo1DPtr _h_nch[4][2][3]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1298811); } diff --git a/analyses/pluginATLAS/ATLAS_2014_I1306615.cc b/analyses/pluginATLAS/ATLAS_2014_I1306615.cc --- a/analyses/pluginATLAS/ATLAS_2014_I1306615.cc +++ b/analyses/pluginATLAS/ATLAS_2014_I1306615.cc @@ -1,484 +1,484 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief ATLAS H->yy differential cross-sections measurement /// /// @author Michaela Queitsch-Maitland // // arXiv: http://arxiv.org/abs/ARXIV:1407.4222 // HepData: http://hepdata.cedar.ac.uk/view/ins1306615 class ATLAS_2014_I1306615 : public Analysis { public: // Constructor ATLAS_2014_I1306615() : Analysis("ATLAS_2014_I1306615") { } // Book histograms and initialise projections before the run void init() { // Final state // All particles within |eta| < 5.0 const FinalState FS(Cuts::abseta<5.0); declare(FS,"FS"); // Project photons with pT > 25 GeV and |eta| < 2.37 IdentifiedFinalState ph_FS(Cuts::abseta<2.37 && Cuts::pT>25.0*GeV); ph_FS.acceptIdPair(PID::PHOTON); declare(ph_FS, "PH_FS"); // Project photons for dressing IdentifiedFinalState ph_dressing_FS(FS); ph_dressing_FS.acceptIdPair(PID::PHOTON); // Project bare electrons IdentifiedFinalState el_bare_FS(FS); el_bare_FS.acceptIdPair(PID::ELECTRON); declare(el_bare_FS,"el_bare_FS"); // Project dressed electrons with pT > 15 GeV and |eta| < 2.47 DressedLeptons el_dressed_FS(ph_dressing_FS, el_bare_FS, 0.1, Cuts::abseta < 2.47 && Cuts::pT > 15*GeV); declare(el_dressed_FS,"EL_DRESSED_FS"); // Project bare muons IdentifiedFinalState mu_bare_FS(FS); mu_bare_FS.acceptIdPair(PID::MUON); // Project dressed muons with pT > 15 GeV and |eta| < 2.47 //DressedLeptons mu_dressed_FS(ph_dressing_FS, mu_bare_FS, 0.1, true, -2.47, 2.47, 15.0*GeV, false); DressedLeptons mu_dressed_FS(ph_dressing_FS, mu_bare_FS, 0.1, Cuts::abseta < 2.47 && Cuts::pT > 15*GeV); declare(mu_dressed_FS,"MU_DRESSED_FS"); // Final state excluding muons and neutrinos (for jet building and photon isolation) VetoedFinalState veto_mu_nu_FS(FS); veto_mu_nu_FS.vetoNeutrinos(); veto_mu_nu_FS.addVetoPairId(PID::MUON); declare(veto_mu_nu_FS, "VETO_MU_NU_FS"); // Build the anti-kT R=0.4 jets, using FinalState particles (vetoing muons and neutrinos) FastJets jets(veto_mu_nu_FS, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); // Book histograms // 1D distributions book(_h_pT_yy ,1,1,1); book(_h_y_yy ,2,1,1); book(_h_Njets30 ,3,1,1); book(_h_Njets50 ,4,1,1); book(_h_pT_j1 ,5,1,1); book(_h_y_j1 ,6,1,1); book(_h_HT ,7,1,1); book(_h_pT_j2 ,8,1,1); book(_h_Dy_jj ,9,1,1); book(_h_Dphi_yy_jj ,10,1,1); book(_h_cosTS_CS ,11,1,1); book(_h_cosTS_CS_5bin ,12,1,1); book(_h_Dphi_jj ,13,1,1); book(_h_pTt_yy ,14,1,1); book(_h_Dy_yy ,15,1,1); book(_h_tau_jet ,16,1,1); book(_h_sum_tau_jet ,17,1,1); book(_h_y_j2 ,18,1,1); book(_h_pT_j3 ,19,1,1); book(_h_m_jj ,20,1,1); book(_h_pT_yy_jj ,21,1,1); // 2D distributions of cosTS_CS x pT_yy book(_h_cosTS_pTyy_low ,22,1,1); book(_h_cosTS_pTyy_high ,22,1,2); book(_h_cosTS_pTyy_rest ,22,1,3); // 2D distributions of Njets x pT_yy book(_h_pTyy_Njets0 ,23,1,1); book(_h_pTyy_Njets1 ,23,1,2); book(_h_pTyy_Njets2 ,23,1,3); book(_h_pTj1_excl ,24,1,1); // Fiducial regions book(_h_fidXSecs ,29,1,1); } // Perform the per-event analysis void analyze(const Event& event) { // Get final state particles const ParticleVector& FS_ptcls = apply(event, "FS").particles(); const ParticleVector& ptcls_veto_mu_nu = apply(event, "VETO_MU_NU_FS").particles(); const ParticleVector& photons = apply(event, "PH_FS").particlesByPt(); const vector& el_dressed = apply(event, "EL_DRESSED_FS").dressedLeptons(); const vector& mu_dressed = apply(event, "MU_DRESSED_FS").dressedLeptons(); // For isolation calculation float dR_iso = 0.4; float ETcut_iso = 14.0; FourMomentum ET_iso; // Fiducial selection: pT > 25 GeV, |eta| < 2.37 and isolation (in cone deltaR = 0.4) is < 14 GeV vector fid_photons; for (const Particle& ph : photons) { // Veto photons from hadron or tau decay if ( fromHadronDecay(ph) ) continue; // Calculate isolation ET_iso = - ph.momentum(); // Loop over fs truth particles (excluding muons and neutrinos) for (const Particle& p : ptcls_veto_mu_nu) { // Check if the truth particle is in a cone of 0.4 if ( deltaR(ph.momentum(), p.momentum()) < dR_iso ) ET_iso += p.momentum(); } // Check isolation if ( ET_iso.Et() > ETcut_iso ) continue; // Fill vector of photons passing fiducial selection fid_photons.push_back(&ph); } if(fid_photons.size() < 2) vetoEvent; const FourMomentum& y1 = fid_photons[0]->momentum(); const FourMomentum& y2 = fid_photons[1]->momentum(); double m_yy = (y1 + y2).mass(); // Relative pT cuts if ( y1.pT() < 0.35 * m_yy || y2.pT() < 0.25 * m_yy ) vetoEvent; // Mass window cut if ( m_yy < 105 || m_yy > 160 ) vetoEvent; // -------------------------------------------- // // Passed diphoton baseline fiducial selection! // // -------------------------------------------- // // Electron selection vector good_el; for(const DressedLepton& els : el_dressed) { const Particle& el = els.constituentLepton(); if ( el.momentum().pT() < 15 ) continue; if ( fabs(el.momentum().eta()) > 2.47 ) continue; if ( deltaR(el.momentum(), y1) < 0.4 ) continue; if ( deltaR(el.momentum(), y2) < 0.4 ) continue; if ( fromHadronDecay(el) ) continue; // Veto electrons from hadron or tau decay good_el.push_back(&el); } // Muon selection vector good_mu; for(const DressedLepton& mus : mu_dressed) { const Particle& mu = mus.constituentLepton(); if ( mu.momentum().pT() < 15 ) continue; if ( fabs(mu.momentum().eta()) > 2.47 ) continue; if ( deltaR(mu.momentum(), y1) < 0.4 ) continue; if ( deltaR(mu.momentum(), y2) < 0.4 ) continue; if ( fromHadronDecay(mu) ) continue; // Veto muons from hadron or tau decay good_mu.push_back(&mu); } // Find prompt, invisible particles for missing ET calculation // Based on VisibleFinalState projection FourMomentum invisible(0,0,0,0); for (const Particle& p : FS_ptcls) { // Veto non-prompt particles (from hadron or tau decay) if ( fromHadronDecay(p) ) continue; // Charged particles are visible - if ( PID::threeCharge( p.pid() ) != 0 ) continue; + if ( PID::charge3( p.pid() ) != 0 ) continue; // Neutral hadrons are visible if ( PID::isHadron( p.pid() ) ) continue; // Photons are visible if ( p.pid() == PID::PHOTON ) continue; // Gluons are visible (for parton level analyses) if ( p.pid() == PID::GLUON ) continue; // Everything else is invisible invisible += p.momentum(); } double MET = invisible.Et(); // Jet selection // Get jets with pT > 25 GeV and |rapidity| < 4.4 //const Jets& jets = apply(event, "JETS").jetsByPt(25.0*GeV, MAXDOUBLE, -4.4, 4.4, RAPIDITY); const Jets& jets = apply(event, "JETS").jetsByPt(Cuts::pT>25.0*GeV && Cuts::absrap <4.4); vector jets_25; vector jets_30; vector jets_50; for (const Jet& jet : jets) { bool passOverlap = true; // Overlap with leading photons if ( deltaR(y1, jet.momentum()) < 0.4 ) passOverlap = false; if ( deltaR(y2, jet.momentum()) < 0.4 ) passOverlap = false; // Overlap with good electrons for (const Particle* el : good_el) if ( deltaR(el->momentum(), jet.momentum()) < 0.2 ) passOverlap = false; if ( ! passOverlap ) continue; if ( fabs(jet.momentum().eta()) < 2.4 || ( fabs(jet.momentum().eta()) > 2.4 && jet.momentum().pT() > 30 ) ) jets_25.push_back(&jet); if ( jet.momentum().pT() > 30 ) jets_30.push_back(&jet); if ( jet.momentum().pT() > 50 ) jets_50.push_back(&jet); } // Fiducial regions _h_fidXSecs->fill(1); if ( jets_30.size() >= 1 ) _h_fidXSecs->fill(2); if ( jets_30.size() >= 2 ) _h_fidXSecs->fill(3); if ( jets_30.size() >= 3 ) _h_fidXSecs->fill(4); if ( jets_30.size() >= 2 && passVBFCuts(y1 + y2, jets_30.at(0)->momentum(), jets_30.at(1)->momentum()) ) _h_fidXSecs->fill(5); if ( (good_el.size() + good_mu.size()) > 0 ) _h_fidXSecs->fill(6); if ( MET > 80 ) _h_fidXSecs->fill(7); // Fill histograms // Inclusive variables _pT_yy = (y1 + y2).pT(); _y_yy = fabs( (y1 + y2).rapidity() ); _cosTS_CS = cosTS_CS(y1, y2); _pTt_yy = pTt(y1, y2); _Dy_yy = fabs( deltaRap(y1, y2) ); _Njets30 = jets_30.size() > 3 ? 3 : jets_30.size(); _Njets50 = jets_50.size() > 3 ? 3 : jets_50.size(); _h_Njets30->fill(_Njets30); _h_Njets50->fill(_Njets50); _pT_j1 = jets_30.size() > 0 ? jets_30.at(0)->momentum().pT() : 0.; _pT_j2 = jets_30.size() > 1 ? jets_30.at(1)->momentum().pT() : 0.; _pT_j3 = jets_30.size() > 2 ? jets_30.at(2)->momentum().pT() : 0.; _HT = 0.0; for (const Jet* jet : jets_30) _HT += jet->momentum().pT(); _tau_jet = tau_jet_max(y1 + y2, jets_25); _sum_tau_jet = sum_tau_jet(y1 + y2, jets_25); _h_pT_yy ->fill(_pT_yy); _h_y_yy ->fill(_y_yy); _h_pT_j1 ->fill(_pT_j1); _h_cosTS_CS ->fill(_cosTS_CS); _h_cosTS_CS_5bin->fill(_cosTS_CS); _h_HT ->fill(_HT); _h_pTt_yy ->fill(_pTt_yy); _h_Dy_yy ->fill(_Dy_yy); _h_tau_jet ->fill(_tau_jet); _h_sum_tau_jet ->fill(_sum_tau_jet); // >=1 jet variables if ( jets_30.size() >= 1 ) { FourMomentum j1 = jets_30[0]->momentum(); _y_j1 = fabs( j1.rapidity() ); _h_pT_j2->fill(_pT_j2); _h_y_j1 ->fill(_y_j1); } // >=2 jet variables if ( jets_30.size() >= 2 ) { FourMomentum j1 = jets_30[0]->momentum(); FourMomentum j2 = jets_30[1]->momentum(); _Dy_jj = fabs( deltaRap(j1, j2) ); _Dphi_jj = fabs( deltaPhi(j1, j2) ); _Dphi_yy_jj = fabs( deltaPhi(y1 + y2, j1 + j2) ); _m_jj = (j1 + j2).mass(); _pT_yy_jj = (y1 + y2 + j1 + j2).pT(); _y_j2 = fabs( j2.rapidity() ); _h_Dy_jj ->fill(_Dy_jj); _h_Dphi_jj ->fill(_Dphi_jj); _h_Dphi_yy_jj ->fill(_Dphi_yy_jj); _h_m_jj ->fill(_m_jj); _h_pT_yy_jj ->fill(_pT_yy_jj); _h_pT_j3 ->fill(_pT_j3); _h_y_j2 ->fill(_y_j2); } // 2D distributions of cosTS_CS x pT_yy if ( _pT_yy < 80 ) _h_cosTS_pTyy_low->fill(_cosTS_CS); else if ( _pT_yy > 80 && _pT_yy < 200 ) _h_cosTS_pTyy_high->fill(_cosTS_CS); else if ( _pT_yy > 200 ) _h_cosTS_pTyy_rest->fill(_cosTS_CS); // 2D distributions of pT_yy x Njets if ( _Njets30 == 0 ) _h_pTyy_Njets0->fill(_pT_yy); else if ( _Njets30 == 1 ) _h_pTyy_Njets1->fill(_pT_yy); else if ( _Njets30 >= 2 ) _h_pTyy_Njets2->fill(_pT_yy); if ( _Njets30 == 1 ) _h_pTj1_excl->fill(_pT_j1); } // Normalise histograms after the run void finalize() { const double xs = crossSectionPerEvent()/femtobarn; scale(_h_pT_yy, xs); scale(_h_y_yy, xs); scale(_h_pT_j1, xs); scale(_h_y_j1, xs); scale(_h_HT, xs); scale(_h_pT_j2, xs); scale(_h_Dy_jj, xs); scale(_h_Dphi_yy_jj, xs); scale(_h_cosTS_CS, xs); scale(_h_cosTS_CS_5bin, xs); scale(_h_Dphi_jj, xs); scale(_h_pTt_yy, xs); scale(_h_Dy_yy, xs); scale(_h_tau_jet, xs); scale(_h_sum_tau_jet, xs); scale(_h_y_j2, xs); scale(_h_pT_j3, xs); scale(_h_m_jj, xs); scale(_h_pT_yy_jj, xs); scale(_h_cosTS_pTyy_low, xs); scale(_h_cosTS_pTyy_high, xs); scale(_h_cosTS_pTyy_rest, xs); scale(_h_pTyy_Njets0, xs); scale(_h_pTyy_Njets1, xs); scale(_h_pTyy_Njets2, xs); scale(_h_pTj1_excl, xs); scale(_h_Njets30, xs); scale(_h_Njets50, xs); scale(_h_fidXSecs, xs); } // Trace event record to see if particle came from a hadron (or a tau from a hadron decay) // Based on fromDecay() function bool fromHadronDecay(const Particle& p ) { const GenParticle * gp = p.genParticle(); if (!gp) return false; /// TODO: something weird to make this necessary const GenVertex* prodVtx = p.genParticle()->production_vertex(); if (prodVtx == NULL) return false; for (const GenParticle* ancestor : particles(prodVtx, HepMC::ancestors)) { const PdgId pid = ancestor->pdg_id(); if (ancestor->status() == 2 && PID::isHadron(pid)) return true; if (ancestor->status() == 2 && (abs(pid) == PID::TAU && fromHadronDecay(ancestor))) return true; } return false; } // VBF-enhanced dijet topology selection cuts bool passVBFCuts(const FourMomentum &H, const FourMomentum &j1, const FourMomentum &j2) { return ( fabs(deltaRap(j1, j2)) > 2.8 && (j1 + j2).mass() > 400 && fabs(deltaPhi(H, j1 + j2)) > 2.6 ); } // Cosine of the decay angle in the Collins-Soper frame double cosTS_CS(const FourMomentum &y1, const FourMomentum &y2) { return fabs( ( (y1.E() + y1.pz())* (y2.E() - y2.pz()) - (y1.E() - y1.pz()) * (y2.E() + y2.pz()) ) / ((y1 + y2).mass() * sqrt(pow((y1 + y2).mass(), 2) + pow((y1 + y2).pt(), 2)) ) ); } // Diphoton pT along thrust axis double pTt(const FourMomentum &y1, const FourMomentum &y2) { return fabs(y1.px() * y2.py() - y2.px() * y1.py()) / (y1 - y2).pT()*2; } // Tau of jet (see paper for description) // tau_jet = mT/(2*cosh(y*)), where mT = pT (+) m, and y* = rapidty in Higgs rest frame double tau_jet( const FourMomentum &H, const FourMomentum &jet ) { return sqrt( pow(jet.pT(),2) + pow(jet.mass(),2) ) / (2.0 * cosh( jet.rapidity() - H.rapidity() ) ); } // Maximal (leading) tau_jet (see paper for description) double tau_jet_max( const FourMomentum &H, const vector jets, double tau_jet_cut = 8. ) { double max_tj = 0; for (size_t i=0; i < jets.size(); ++i) { FourMomentum jet = jets[i]->momentum(); if ( tau_jet(H, jet) > tau_jet_cut ) max_tj = max( tau_jet(H, jet), max_tj ); } return max_tj; } // Scalar sum of tau for all jets (see paper for description) double sum_tau_jet( const FourMomentum &H, const vector jets, double tau_jet_cut = 8. ) { double sum_tj = 0; for (size_t i=0; i < jets.size(); ++i) { FourMomentum jet = jets[i]->momentum(); if ( tau_jet(H, jet) > tau_jet_cut ) sum_tj += tau_jet(H, jet); } return sum_tj; } private: Histo1DPtr _h_pT_yy; Histo1DPtr _h_y_yy; Histo1DPtr _h_Njets30; Histo1DPtr _h_Njets50; Histo1DPtr _h_pT_j1; Histo1DPtr _h_y_j1; Histo1DPtr _h_HT; Histo1DPtr _h_pT_j2; Histo1DPtr _h_Dy_jj; Histo1DPtr _h_Dphi_yy_jj; Histo1DPtr _h_cosTS_CS; Histo1DPtr _h_cosTS_CS_5bin; Histo1DPtr _h_Dphi_jj; Histo1DPtr _h_pTt_yy; Histo1DPtr _h_Dy_yy; Histo1DPtr _h_tau_jet; Histo1DPtr _h_sum_tau_jet; Histo1DPtr _h_y_j2; Histo1DPtr _h_pT_j3; Histo1DPtr _h_m_jj; Histo1DPtr _h_pT_yy_jj; Histo1DPtr _h_cosTS_pTyy_low; Histo1DPtr _h_cosTS_pTyy_high; Histo1DPtr _h_cosTS_pTyy_rest; Histo1DPtr _h_pTyy_Njets0; Histo1DPtr _h_pTyy_Njets1; Histo1DPtr _h_pTyy_Njets2; Histo1DPtr _h_pTj1_excl; Histo1DPtr _h_fidXSecs; int _Njets30; int _Njets50; double _pT_yy; double _y_yy; double _cosTS_CS; double _pT_j1; double _m_jj; double _y_j1; double _HT; double _pT_j2; double _y_j2; double _Dphi_yy_jj; double _pT_yy_jj; double _Dphi_jj; double _Dy_jj; double _pT_j3; double _pTt_yy; double _Dy_yy; double _tau_jet; double _sum_tau_jet; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2014_I1306615); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1351916.cc b/analyses/pluginATLAS/ATLAS_2015_I1351916.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1351916.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1351916.cc @@ -1,150 +1,150 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" namespace Rivet { class ATLAS_2015_I1351916 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructors ATLAS_2015_I1351916(string name="ATLAS_2015_I1351916", size_t mode=0) : Analysis(name), _mode(mode) // pick electron channel by default { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { const FinalState fs; IdentifiedFinalState bareleptons(fs); bareleptons.acceptIdPair(_mode? PID::MUON : PID::ELECTRON); const Cut cuts = (_mode == 0) ? (Cuts::pT > 25*GeV && Cuts::abseta < 4.9) : (Cuts::pT > 20*GeV && Cuts::abseta < 2.47); DressedLeptons leptons(fs, bareleptons, 0.1, cuts, true); declare(leptons, "leptons"); // Book dummy histograms for heterogeneous merging /// @todo AB: Don't we have a nicer way to book dummy/tmp histos from ref? string label = "NCC"; string hname = "d01-x01-y01"; const Scatter2D& ref = refData(hname); hname = "d01-x01-y02"; book(_h[label + "_pos"] ,hname, ref); hname = "d01-x01-y03"; book(_h[label + "_neg"] ,hname, ref); if (_mode == 0) { label = "NCF"; hname = "d01-x02-y01"; const Scatter2D& ref_cf = refData(hname); hname = "d01-x02-y02"; book(_h[label + "_pos"] ,hname, ref_cf); hname = "d01-x02-y03"; book(_h[label + "_neg"] ,hname, ref_cf); } // Book asymmetry scatter plots book(_s["CC"], 1, 1, 1, true); if (_mode == 0) book(_s["CF"], 1, 2, 1, true); } /// Perform the per-event analysis void analyze(const Event& e) { // Get and cut on dressed leptons const vector& leptons = apply(e, "leptons").dressedLeptons(); if (leptons.size() != 2) vetoEvent; // require exactly two leptons - if (leptons[0].threeCharge() * leptons[1].threeCharge() > 0) vetoEvent; // require opposite charge + if (leptons[0].charge3() * leptons[1].charge3() > 0) vetoEvent; // require opposite charge // Identify lepton vs antilepton - const Particle& lpos = leptons[(leptons[0].threeCharge() > 0) ? 0 : 1]; - const Particle& lneg = leptons[(leptons[0].threeCharge() < 0) ? 0 : 1]; + const Particle& lpos = leptons[(leptons[0].charge3() > 0) ? 0 : 1]; + const Particle& lneg = leptons[(leptons[0].charge3() < 0) ? 0 : 1]; string label = "N"; if (_mode == 1) {// electron channel label += "CC"; // only central-central for muons } else { // electron channel const double eta1 = lpos.abseta(); const double eta2 = lneg.abseta(); if ( (eta1 < 2.47 && inRange(eta2, 2.5, 4.9)) || (eta2 < 2.47 && inRange(eta1, 2.5, 4.9)) ) label += "CF"; // central-forward else if (eta1 < 2.47 && eta2 < 2.47) label += "CC"; // central-central else vetoEvent; // ain't no forward-forward } const double cosThetaStar = cosCollinsSoper(lneg, lpos); const double mll = (lpos.mom() + lneg.mom()).mass(); label += cosThetaStar < 0.0? "_neg" : "_pos"; _h[label]->fill(mll/GeV); } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSectionPerEvent() / picobarn; for (const auto& key_hist : _h) scale(key_hist.second, sf); divide(*_h["NCC_pos"] - *_h["NCC_neg"], *_h["NCC_pos"] + *_h["NCC_neg"], _s["CC"]); if (!_mode) divide(*_h["NCF_pos"] - *_h["NCF_neg"], *_h["NCF_pos"] + *_h["NCF_neg"], _s["CF"]); } // Cosine of the decay angle in the Collins-Soper frame double cosCollinsSoper(const FourMomentum& l1, const FourMomentum& l2) { const FourMomentum ll = l1 + l2; const double nom = (l1.E() + l1.pz()) * (l2.E() - l2.pz()) - (l1.E() - l1.pz()) * (l2.E() + l2.pz()); const double denom = ll.mass() * sqrt( sqr(ll.mass()) + sqr(ll.pt()) ); return sign(ll.pz()) * safediv(nom, denom); // protect against division by zero, you never know... } //@} protected: /// Electron or muon mode = 0 or 1, for use by derived _EL, _MU analysis classes size_t _mode; private: /// Histograms map _h; /// Asymmetries map _s; }; class ATLAS_2015_I1351916_EL : public ATLAS_2015_I1351916 { public: ATLAS_2015_I1351916_EL() : ATLAS_2015_I1351916("ATLAS_2015_I1351916_EL", 0) { } }; class ATLAS_2015_I1351916_MU : public ATLAS_2015_I1351916 { public: ATLAS_2015_I1351916_MU() : ATLAS_2015_I1351916("ATLAS_2015_I1351916_MU", 1) { } }; DECLARE_RIVET_PLUGIN(ATLAS_2015_I1351916); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1351916_EL); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1351916_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1376945.cc b/analyses/pluginATLAS/ATLAS_2015_I1376945.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1376945.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1376945.cc @@ -1,221 +1,221 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief Colour flow in hadronic top decay at 8 TeV class ATLAS_2015_I1376945 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1376945); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { const FinalState fs; PromptFinalState promptFs(fs); promptFs.acceptTauDecays(true); promptFs.acceptMuonDecays(false); IdentifiedFinalState neutrino_fs(promptFs); neutrino_fs.acceptNeutrinos(); declare(neutrino_fs, "NEUTRINO_FS"); IdentifiedFinalState Photon(fs); Photon.acceptIdPair(PID::PHOTON); IdentifiedFinalState bare_muons_fs(promptFs); bare_muons_fs.acceptIdPair(PID::MUON); IdentifiedFinalState bare_elecs_fs(promptFs); bare_elecs_fs.acceptIdPair(PID::ELECTRON); Cut lep_cuts = (Cuts::abseta < 2.5) & (Cuts::pT > 1*MeV); DressedLeptons muons(Photon, bare_muons_fs, 0.1, lep_cuts); declare(muons, "MUONS"); DressedLeptons elecs(Photon, bare_elecs_fs, 0.1, lep_cuts); declare(elecs, "ELECS"); VetoedFinalState vfs; vfs.addVetoOnThisFinalState(muons); vfs.addVetoOnThisFinalState(elecs); vfs.addVetoOnThisFinalState(neutrino_fs); FastJets fjets(vfs, FastJets::ANTIKT, 0.4); fjets.useInvisibles(); declare(fjets, "jets"); book(h_pull_all ,4,1,1); book(h_pull_charged ,5,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { /************** * JETS * **************/ const Jets& allJets = apply(event, "jets").jetsByPt(Cuts::pT > 25.0*GeV && Cuts::absrap < 2.5); const vector& all_elecs = apply(event, "ELECS").dressedLeptons(); const vector& all_muons = apply(event, "MUONS").dressedLeptons(); Jets goodJets; for (const Jet j : allJets) { bool keep = true; for (const DressedLepton el : all_elecs) keep &= deltaR(j, el) >= 0.2; if (keep) goodJets += j; } if ( goodJets.size() < 4 ) vetoEvent; /**************** * LEPTONS * ****************/ vector muons, vetoMuons; for (const DressedLepton mu : all_muons) { bool keep = true; for (const Jet j : goodJets) keep &= deltaR(j, mu) >= 0.4; if (keep && mu.pt() > 15*GeV) { vetoMuons.push_back(mu); if (mu.pt() > 25*GeV) muons.push_back(mu); } } vector elecs, vetoElecs; for (const DressedLepton el : all_elecs) { bool keep = true; for (const Jet j : goodJets) keep &= deltaR(j, el) >= 0.4; if (keep && el.pt() > 15*GeV) { vetoElecs.push_back(el); if (el.pt() > 25*GeV) elecs.push_back(el); } } if (muons.empty() && elecs.empty()) vetoEvent; bool muCandidate = !( muons.size() < 1 || vetoMuons.size() > 1 || vetoElecs.size() > 0 ); bool elCandidate = !( elecs.size() < 1 || vetoElecs.size() > 1 || vetoMuons.size() > 0 ); if (!elCandidate && !muCandidate) vetoEvent; /****************************** * ELECTRON-MUON OVERLAP * ******************************/ for (const DressedLepton electron : elecs) { for (const DressedLepton muon : muons) { double d_theta = fabs(muon.theta() - electron.theta()); double d_phi = deltaPhi(muon.phi(), electron.phi()); if (d_theta < 0.005 && d_phi < 0.005) vetoEvent; } } /**************** * NEUTRINOS * ****************/ const Particles& neutrinos = apply(event, "NEUTRINO_FS").particlesByPt(); FourMomentum metVector = FourMomentum(0.,0.,0.,0.); for (const Particle& n : neutrinos) { metVector += n.momentum(); } double met = metVector.pt(); if (met <= 20*GeV) vetoEvent; if ( (_mT(muCandidate? muons[0] : elecs[0], metVector) + met) <= 60. ) vetoEvent; /**************** * B-JETS * ****************/ Jets bJets, wJets; for(Jet j : goodJets) { bool b_tagged = false; Particles bTags = j.bTags(); for ( Particle b : bTags ) { b_tagged |= b.pT() > 5*GeV; } if (b_tagged) bJets += j; if (!b_tagged && j.abseta() < 2.1) wJets += j; } if ( bJets.size() < 2 || wJets.size() < 2 ) vetoEvent; double pull_angle = fabs(CalculatePullAngle(wJets[0], wJets[1], 0)); h_pull_all->fill(pull_angle / Rivet::PI); double pull_angle_charged = fabs(CalculatePullAngle(wJets[0], wJets[1], 1)); h_pull_charged->fill(pull_angle_charged / Rivet::PI); } Vector3 CalculatePull(Jet& jet, bool &isCharged) { Vector3 pull(0.0, 0.0, 0.0); double PT = jet.pT(); Particles& constituents = jet.particles(); Particles charged_constituents; if (isCharged) { for (Particle p : constituents) { - if (p.threeCharge() != 0) charged_constituents += p; + if (p.charge3() != 0) charged_constituents += p; } constituents = charged_constituents; } // calculate axis FourMomentum axis; for (Particle p : constituents) axis += p.momentum(); Vector3 J(axis.rap(), axis.phi(MINUSPI_PLUSPI), 0.0); // calculate pull for (Particle p : constituents) { Vector3 ri = Vector3(p.rap(), p.phi(MINUSPI_PLUSPI), 0.0) - J; while (ri.y() > Rivet::PI) ri.setY(ri.y() - Rivet::TWOPI); while (ri.y() < -Rivet::PI) ri.setY(ri.y() + Rivet::TWOPI); pull.setX(pull.x() + (ri.mod() * ri.x() * p.pT()) / PT); pull.setY(pull.y() + (ri.mod() * ri.y() * p.pT()) / PT); } return pull; } double CalculatePullAngle(Jet& jet1, Jet& axisjet, bool isCharged) { Vector3 pull_vector = CalculatePull(jet1, isCharged); pull_vector = Vector3(1000.*pull_vector.x(), 1000.*pull_vector.y(), 0.); double drap = axisjet.rap() - jet1.rap(); double dphi = axisjet.phi(MINUSPI_PLUSPI) - jet1.phi(MINUSPI_PLUSPI); Vector3 j2_vector(drap, dphi, 0.0); return mapAngleMPiToPi(deltaPhi(pull_vector, j2_vector)); } double _mT(const FourMomentum &l, FourMomentum &nu) const { return sqrt( 2 * l.pT() * nu.pT() * (1 - cos(deltaPhi(l, nu))) ); } /// Normalise histograms etc., after the run void finalize() { normalize(h_pull_all); normalize(h_pull_charged); } //@} private: Histo1DPtr h_pull_all; Histo1DPtr h_pull_charged; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1376945); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1393758.cc b/analyses/pluginATLAS/ATLAS_2015_I1393758.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1393758.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1393758.cc @@ -1,141 +1,141 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2015_I1393758 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1393758); public: void init() { declare(FastJets(FinalState(), FastJets::ANTIKT, 0.4), "Jets"); book(forward_kappa3 ,1, 1, 1); book(forward_kappa5 ,2, 1, 1); book(forward_kappa7 ,3, 1, 1); book(central_kappa3 ,4, 1, 1); book(central_kappa5 ,5, 1, 1); book(central_kappa7 ,6, 1, 1); book(forwardRMS_kappa3, "d07-x01-y01", true); book(forwardRMS_kappa5, "d08-x01-y01", true); book(forwardRMS_kappa7, "d09-x01-y01", true); book(centralRMS_kappa3, "d10-x01-y01", true); book(centralRMS_kappa5, "d11-x01-y01", true); book(centralRMS_kappa7, "d12-x01-y01", true); } /// Perform the per-event analysis void analyze(const Event& event) { Jets m_goodJets = applyProjection(event, "Jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.1); if (m_goodJets.size() < 2) vetoEvent; if (m_goodJets[0].pT() < 50*GeV) vetoEvent; if (m_goodJets[1].pT() < 50*GeV) vetoEvent; if (fabs(1.0 - m_goodJets[0].pT()/m_goodJets[1].pT()) > 0.5) vetoEvent; bool check = m_goodJets[0].abseta() < m_goodJets[1].abseta(); int pos_f = int(check); int pos_c = int(!check); double kappa3_f = CalculateJetCharge(m_goodJets[pos_f], 0.3, 0.5, 1.8); double kappa5_f = CalculateJetCharge(m_goodJets[pos_f], 0.5, 0.5, 1.2); double kappa7_f = CalculateJetCharge(m_goodJets[pos_f], 0.7, 0.5, 0.9); double pT_f = m_goodJets[pos_f].pT(); double kappa3_c = CalculateJetCharge(m_goodJets[pos_c], 0.3, 0.5, 1.8); double kappa5_c = CalculateJetCharge(m_goodJets[pos_c], 0.5, 0.5, 1.2); double kappa7_c = CalculateJetCharge(m_goodJets[pos_c], 0.7, 0.5, 0.9); double pT_c = m_goodJets[pos_c].pT(); forward_kappa3->fill(pT_f, kappa3_f); forward_kappa5->fill(pT_f, kappa5_f); forward_kappa7->fill(pT_f, kappa7_f); central_kappa3->fill(pT_c, kappa3_c); central_kappa5->fill(pT_c, kappa5_c); central_kappa7->fill(pT_c, kappa7_c); } double CalculateJetCharge(Jet& jet, double kappa=0.5, double pTcut=0.5, double Qmax=1.2) { double PTkap = pow(jet.momentum().pT(),kappa); double jetcharge = 0.; for (const Particle& p : jet.particles()) { if (p.pT() < pTcut) continue; - if (p.threeCharge()) jetcharge += (p.threeCharge()/3.)*pow(p.pT(),kappa)/PTkap; + if (p.charge3()) jetcharge += (p.charge3()/3.)*pow(p.pT(),kappa)/PTkap; } //Overflow and underflow if (jetcharge > Qmax) jetcharge = Qmax*0.9999; if (jetcharge < -Qmax) jetcharge = -Qmax*0.9999; return jetcharge; } /// Normalise histograms etc., after the run void finalize() { if (numEvents() > 2) { for (unsigned int i = 0; i < forward_kappa3->numBins(); ++i) { double stdv_fkappa3 = forward_kappa3->bin(i).numEntries() > 1? forward_kappa3->bin(i).stdDev() : 0.0; //See Eq. 3 for the factor of two: https://web.eecs.umich.edu/~fessler/papers/files/tr/stderr.pdf double yerr_fkappa3 = safediv(sqrt(forward_kappa3->bin(i).sumW2()), 2.*forward_kappa3->bin(i).sumW()); forwardRMS_kappa3->point(i).setY(stdv_fkappa3, yerr_fkappa3); double stdv_fkappa5 = forward_kappa5->bin(i).numEntries() > 1? forward_kappa5->bin(i).stdDev() : 0.0; double yerr_fkappa5 = safediv(sqrt(forward_kappa5->bin(i).sumW2()), 2.*forward_kappa5->bin(i).sumW()); forwardRMS_kappa5->point(i).setY(stdv_fkappa5, yerr_fkappa5); double stdv_fkappa7 = forward_kappa7->bin(i).numEntries() > 1? forward_kappa7->bin(i).stdDev() : 0.0; double yerr_fkappa7 = safediv(sqrt(forward_kappa7->bin(i).sumW2()), 2.*forward_kappa7->bin(i).sumW()); forwardRMS_kappa7->point(i).setY(stdv_fkappa7, yerr_fkappa7); double stdv_ckappa3 = central_kappa3->bin(i).numEntries() > 1? central_kappa3->bin(i).stdDev() : 0.0; double yerr_ckappa3 = safediv(sqrt(central_kappa3->bin(i).sumW2()), 2.*central_kappa3->bin(i).sumW()); centralRMS_kappa3->point(i).setY(stdv_ckappa3, yerr_ckappa3); double stdv_ckappa5 = central_kappa5->bin(i).numEntries() > 1? central_kappa5->bin(i).stdDev() : 0.0; double yerr_ckappa5 = safediv(sqrt(central_kappa5->bin(i).sumW2()), 2.*central_kappa5->bin(i).sumW()); centralRMS_kappa5->point(i).setY(stdv_ckappa5, yerr_ckappa5); double stdv_ckappa7 = central_kappa7->bin(i).numEntries() > 1? central_kappa7->bin(i).stdDev() : 0.0; double yerr_ckappa7 = safediv(sqrt(central_kappa7->bin(i).sumW2()), 2.*central_kappa7->bin(i).sumW()); centralRMS_kappa7->point(i).setY(stdv_ckappa7, yerr_ckappa7); } } } private: Profile1DPtr forward_kappa3; Profile1DPtr forward_kappa5; Profile1DPtr forward_kappa7; Profile1DPtr central_kappa3; Profile1DPtr central_kappa5; Profile1DPtr central_kappa7; Scatter2DPtr forwardRMS_kappa3; Scatter2DPtr forwardRMS_kappa5; Scatter2DPtr forwardRMS_kappa7; Scatter2DPtr centralRMS_kappa3; Scatter2DPtr centralRMS_kappa5; Scatter2DPtr centralRMS_kappa7; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1393758); } diff --git a/analyses/pluginATLAS/ATLAS_2015_I1408516.cc b/analyses/pluginATLAS/ATLAS_2015_I1408516.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1408516.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1408516.cc @@ -1,242 +1,242 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class ATLAS_2015_I1408516 : public Analysis { public: /// Constructor ATLAS_2015_I1408516(string name="ATLAS_2015_I1408516", size_t mode=0) : Analysis(name), _mode(mode) // using electron channel for combined data { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Configure projections FinalState fs; Cut cuts = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV; ZFinder zfinder(fs, cuts, (_mode ? PID::MUON : PID::ELECTRON), 12*GeV, 150*GeV, 0.1, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::NO); declare(zfinder, _mode ? "ZFinder_mu" : "ZFinder_el"); // Book histograms const size_t offset = _mode ? 4 : 1; book(_h["phistar_lo_00_08"] , 2, 1, offset); book(_h["phistar_lo_08_16"] , 3, 1, offset); book(_h["phistar_lo_16_24"] , 4, 1, offset); book(_h["phistar_me_00_04"] , 5, 1, offset); book(_h["phistar_me_04_08"] , 6, 1, offset); book(_h["phistar_me_08_12"] , 7, 1, offset); book(_h["phistar_me_12_16"] , 8, 1, offset); book(_h["phistar_me_16_20"] , 9, 1, offset); book(_h["phistar_me_20_24"] ,10, 1, offset); book(_h["phistar_hi_00_08"] ,11, 1, offset); book(_h["phistar_hi_08_16"] ,12, 1, offset); book(_h["phistar_hi_16_24"] ,13, 1, offset); book(_h["phistar_mll_46_66" ] ,14, 1, offset); book(_h["phistar_mll_66_116" ] ,15, 1, offset); book(_h["phistar_mll_116_150"] ,16, 1, offset); book(_h["zpt_00_04"] ,17, 1, offset); book(_h["zpt_04_08"] ,18, 1, offset); book(_h["zpt_08_12"] ,19, 1, offset); book(_h["zpt_12_16"] ,20, 1, offset); book(_h["zpt_16_20"] ,21, 1, offset); book(_h["zpt_20_24"] ,22, 1, offset); book(_h["zpt_mll_12_20" ] ,23, 1, offset); book(_h["zpt_mll_20_30" ] ,24, 1, offset); book(_h["zpt_mll_30_46" ] ,25, 1, offset); book(_h["zpt_mll_46_66" ] ,26, 1, offset); book(_h["zpt_mll_66_116" ] ,27, 1, offset); book(_h["zpt_mll_116_150"] ,28, 1, offset); book(_h["zpt_00_04_xsec"] ,29, 1, offset); book(_h["zpt_04_08_xsec"] ,30, 1, offset); book(_h["zpt_08_12_xsec"] ,31, 1, offset); book(_h["zpt_12_16_xsec"] ,32, 1, offset); book(_h["zpt_16_20_xsec"] ,33, 1, offset); book(_h["zpt_20_24_xsec"] ,34, 1, offset); book(_h["zpt_mll_12_20_xsec" ] ,35, 1, offset); book(_h["zpt_mll_20_30_xsec" ] ,36, 1, offset); book(_h["zpt_mll_30_46_xsec" ] ,37, 1, offset); book(_h["zpt_mll_46_66_xsec" ] ,38, 1, offset); book(_h["zpt_mll_66_116_xsec" ] ,39, 1, offset); book(_h["zpt_mll_116_150_xsec"] ,40, 1, offset); book(_h["mll_xsec"] ,41, 1, 1 + _mode); } /// Perform the per-event analysis void analyze(const Event& event) { // Get leptonic Z boson const ZFinder& zfinder = apply(event, _mode ? "ZFinder_mu" : "ZFinder_el"); if (zfinder.bosons().size() != 1 ) vetoEvent; const Particle& Zboson = zfinder.boson(); // Get/cut on heavily used Z boson properties const double zpt = Zboson.pT(); const double zrap = Zboson.absrap(); const double zmass = Zboson.mass(); if (zrap > 2.4) vetoEvent; // Get/cut on Z boson leptons const ParticleVector& leptons = zfinder.constituents(); - if (leptons.size() != 2 || leptons[0].threeCharge() * leptons[1].threeCharge() > 0) vetoEvent; + if (leptons.size() != 2 || leptons[0].charge3() * leptons[1].charge3() > 0) vetoEvent; const Particle& lminus = leptons[0].charge() < 0 ? leptons[0] : leptons[1]; const Particle& lplus = leptons[0].charge() < 0 ? leptons[1] : leptons[0]; // Compute phi* const double phi_acop = M_PI - deltaPhi(lminus, lplus); const double costhetastar = tanh( 0.5 * (lminus.eta() - lplus.eta()) ); const double sin2thetastar = (costhetastar > 1) ? 0.0 : (1.0 - sqr(costhetastar)); const double phistar = tan(0.5 * phi_acop) * sqrt(sin2thetastar); // Inclusive mll if (zmass > 46*GeV || zpt > 45*GeV) { // 46 GeV < mll < 150 GeV OR (12 GeV < mll < 46 GeV AND ZpT >45 GeV) _h["mll_xsec"]->fill(zmass); } // 12 GeV < mll < 150 GeV observables if (zmass < 20*GeV) { // 12 GeV < mll < 20 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_12_20_xsec"]->fill(zpt); _h["zpt_mll_12_20" ]->fill(zpt); } } else if (zmass < 30*GeV) { // 20 GeV < mll < 30 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_20_30_xsec"]->fill(zpt); _h["zpt_mll_20_30" ]->fill(zpt); } } else if (zmass < 46*GeV) { // 30 GeV < mll < 46 GeV if (zpt > 45*GeV) { // ZpT cut only for low-mass regions _h["zpt_mll_30_46_xsec"]->fill(zpt); _h["zpt_mll_30_46" ]->fill(zpt); } } else if (zmass < 66*GeV) { // 46 GeV < mll < 66 GeV _h["zpt_mll_46_66_xsec"]->fill(zpt); _h["zpt_mll_46_66" ]->fill(zpt); _h["phistar_mll_46_66"]->fill(phistar); if (zrap < 0.8) _h["phistar_lo_00_08"]->fill(phistar); else if (zrap < 1.6) _h["phistar_lo_08_16"]->fill(phistar); else _h["phistar_lo_16_24"]->fill(phistar); } else if (zmass < 116*GeV) { // 66 GeV < mll < 116 GeV _h["zpt_mll_66_116_xsec"]->fill(zpt); _h["zpt_mll_66_116" ]->fill(zpt); if (zrap < 0.4) { _h["zpt_00_04_xsec"]->fill(zpt); _h["zpt_00_04"]->fill(zpt); } else if (zrap < 0.8) { _h["zpt_04_08_xsec"]->fill(zpt); _h["zpt_04_08"]->fill(zpt); } else if (zrap < 1.2) { _h["zpt_08_12_xsec"]->fill(zpt); _h["zpt_08_12"]->fill(zpt); } else if (zrap < 1.6) { _h["zpt_12_16_xsec"]->fill(zpt); _h["zpt_12_16"]->fill(zpt); } else if (zrap < 2.0) { _h["zpt_16_20_xsec"]->fill(zpt); _h["zpt_16_20"]->fill(zpt); } else { _h["zpt_20_24_xsec"]->fill(zpt); _h["zpt_20_24"]->fill(zpt); } _h["phistar_mll_66_116"]->fill(phistar); if (zrap < 0.4) _h["phistar_me_00_04"]->fill(phistar); else if (zrap < 0.8) _h["phistar_me_04_08"]->fill(phistar); else if (zrap < 1.2) _h["phistar_me_08_12"]->fill(phistar); else if (zrap < 1.6) _h["phistar_me_12_16"]->fill(phistar); else if (zrap < 2.0) _h["phistar_me_16_20"]->fill(phistar); else _h["phistar_me_20_24"]->fill(phistar); } else { // 116 GeV < mll < 150 GeV _h["zpt_mll_116_150_xsec"]->fill(zpt); _h["zpt_mll_116_150" ]->fill(zpt); _h["phistar_mll_116_150"]->fill(phistar); if (zrap < 0.8) _h["phistar_hi_00_08"]->fill(phistar); else if (zrap < 1.6) _h["phistar_hi_08_16"]->fill(phistar); else _h["phistar_hi_16_24"]->fill(phistar); } } /// Normalise histograms etc., after the run void finalize() { // Scale non-xsec plots to cross-section const double sf = crossSection() / picobarn / sumOfWeights(); for (const auto& key_hist : _h) { scale(key_hist.second, sf); if (!contains(key_hist.first, "_xsec")) normalize(key_hist.second); } // M(ll) plot isn't a differential cross section so shouldn't be divided by bin width for (size_t i = 0; i < 6; ++i) { double bw = _h["mll_xsec"]->bin(i).xWidth(); _h["mll_xsec"]->bin(i).scaleW(bw); } } //@} protected: size_t _mode; private: /// @name Histograms //@{ map _h; //@} }; class ATLAS_2015_I1408516_EL : public ATLAS_2015_I1408516 { public: ATLAS_2015_I1408516_EL() : ATLAS_2015_I1408516("ATLAS_2015_I1408516_EL", 0) { } }; class ATLAS_2015_I1408516_MU : public ATLAS_2015_I1408516 { public: ATLAS_2015_I1408516_MU() : ATLAS_2015_I1408516("ATLAS_2015_I1408516_MU", 1) { } }; DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516_EL); DECLARE_RIVET_PLUGIN(ATLAS_2015_I1408516_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2016_I1419070.cc b/analyses/pluginATLAS/ATLAS_2016_I1419070.cc --- a/analyses/pluginATLAS/ATLAS_2016_I1419070.cc +++ b/analyses/pluginATLAS/ATLAS_2016_I1419070.cc @@ -1,139 +1,139 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2016_I1419070 : public Analysis { public: /// Constructor ATLAS_2016_I1419070() : Analysis("ATLAS_2016_I1419070") { } public: void init() { declare(FastJets(FinalState(), FastJets::ANTIKT, 0.4), "Jets"); book(forward_500MeV ,1, 1, 1); book(forward_2GeV ,2, 1, 1); book(forward_5GeV ,3, 1, 1); book(central_500MeV ,4, 1, 1); book(central_2GeV ,5, 1, 1); book(central_5GeV ,6, 1, 1); book(diff_500MeV, "d07-x01-y01", true); book(diff_2GeV , "d08-x01-y01", true); book(diff_5GeV , "d09-x01-y01", true); book(sum_500MeV, "d10-x01-y01", true); book(sum_2GeV , "d11-x01-y01", true); book(sum_5GeV , "d12-x01-y01", true); } /// Perform the per-event analysis void analyze(const Event& event) { Jets m_goodJets = applyProjection(event, "Jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.1); if (m_goodJets.size() < 2) vetoEvent; if (m_goodJets[0].pT() < 50*GeV) vetoEvent; if (m_goodJets[1].pT() < 50*GeV) vetoEvent; if (fabs(1.0 - m_goodJets[0].pT()/m_goodJets[1].pT()) > 0.5) vetoEvent; bool check = m_goodJets[0].abseta() < m_goodJets[1].abseta(); int pos_f = int(check); int pos_c = int(!check); double pt500MeV_f = CalculateNCharge(m_goodJets[pos_f], 0.5); double pt2GeV_f = CalculateNCharge(m_goodJets[pos_f], 2.0); double pt5GeV_f = CalculateNCharge(m_goodJets[pos_f], 5.0); double pT_f = m_goodJets[pos_f].pT(); double pt500MeV_c = CalculateNCharge(m_goodJets[pos_c], 0.5); double pt2GeV_c = CalculateNCharge(m_goodJets[pos_c], 2.0); double pt5GeV_c = CalculateNCharge(m_goodJets[pos_c], 5.0); double pT_c = m_goodJets[pos_c].pT(); forward_500MeV->fill(pT_f, pt500MeV_f); forward_2GeV->fill( pT_f, pt2GeV_f); forward_5GeV->fill( pT_f, pt5GeV_f); central_500MeV->fill(pT_c, pt500MeV_c); central_2GeV->fill( pT_c, pt2GeV_c); central_5GeV->fill( pT_c, pt5GeV_c); } double CalculateNCharge(Jet& jet, double pTcut=0.5) { unsigned int ncharge = 0; for (const Particle& p : jet.particles()) { if (p.pT() < pTcut) continue; - if (p.threeCharge()) ++ncharge; + if (p.charge3()) ++ncharge; } if (ncharge > 60) ncharge = 60; return double(ncharge); } /// Normalise histograms etc., after the run void finalize() { if (numEvents() > 2) { for (unsigned int i = 0; i < forward_500MeV->numBins(); ++i) { ProfileBin1D bsum = central_500MeV->bin(i) + forward_500MeV->bin(i); ProfileBin1D bsum2 = central_2GeV->bin(i) + forward_2GeV->bin(i); ProfileBin1D bsum5 = central_5GeV->bin(i) + forward_5GeV->bin(i); ProfileBin1D bdiff = central_500MeV->bin(i) - forward_500MeV->bin(i); ProfileBin1D bdiff2 = central_2GeV->bin(i) - forward_2GeV->bin(i); ProfileBin1D bdiff5 = central_5GeV->bin(i) - forward_5GeV->bin(i); double ydiff = central_500MeV->bin(i).numEntries()? central_500MeV->bin(i).mean() : 0.0; double ydiff2 = central_2GeV->bin(i).numEntries()? central_2GeV->bin(i).mean() : 0.0; double ydiff5 = central_5GeV->bin(i).numEntries()? central_5GeV->bin(i).mean() : 0.0; ydiff -= forward_500MeV->bin(i).numEntries()? forward_500MeV->bin(i).mean() : 0.0; ydiff2 -= forward_2GeV->bin(i).numEntries()? forward_2GeV->bin(i).mean() : 0.0; ydiff5 -= forward_5GeV->bin(i).numEntries()? forward_5GeV->bin(i).mean() : 0.0; double yerr = bsum.numEntries() > 1.0 ? bsum.stdErr() : 0.0; double yerr2 = bsum2.numEntries() > 1.0 ? bsum2.stdErr() : 0.0; double yerr5 = bsum5.numEntries() > 1.0 ? bsum5.stdErr() : 0.0; diff_500MeV->point(i).setY(ydiff, yerr); diff_2GeV->point(i).setY(ydiff2, yerr2); diff_5GeV->point(i).setY(ydiff5, yerr5); sum_500MeV->point(i).setY(bsum.numEntries()? bsum.mean() : 0.0, yerr); sum_2GeV->point(i).setY(bsum2.numEntries()? bsum2.mean() : 0.0, yerr2); sum_5GeV->point(i).setY(bsum5.numEntries()? bsum5.mean() : 0.0, yerr5); } } } private: Profile1DPtr forward_500MeV; Profile1DPtr forward_2GeV; Profile1DPtr forward_5GeV; Profile1DPtr central_500MeV; Profile1DPtr central_2GeV; Profile1DPtr central_5GeV; Scatter2DPtr sum_500MeV; Scatter2DPtr sum_2GeV; Scatter2DPtr sum_5GeV; Scatter2DPtr diff_500MeV; Scatter2DPtr diff_2GeV; Scatter2DPtr diff_5GeV; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2016_I1419070); } diff --git a/analyses/pluginATLAS/ATLAS_2017_I1609448.cc b/analyses/pluginATLAS/ATLAS_2017_I1609448.cc --- a/analyses/pluginATLAS/ATLAS_2017_I1609448.cc +++ b/analyses/pluginATLAS/ATLAS_2017_I1609448.cc @@ -1,285 +1,285 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" namespace Rivet { /// ATLAS pTmiss+jets cross-section ratios at 13 TeV class ATLAS_2017_I1609448 : public Analysis { public: /// Constructor ATLAS_2017_I1609448(string name="ATLAS_2017_I1609448") : Analysis(name) { _mode = 0; // using Z -> nunu channel by default } struct HistoHandler { Histo1DPtr histo; Scatter2DPtr scatter; unsigned int d, x, y; void fill(double value) { histo->fill(value); } }; /// Initialize void init() { // Prompt photons PromptFinalState photon_fs(Cuts::abspid == PID::PHOTON && Cuts::abseta < 4.9); // Prompt electrons PromptFinalState el_fs(Cuts::abseta < 4.9 && Cuts::abspid == PID::ELECTRON); // Prompt muons PromptFinalState mu_fs(Cuts::abseta < 4.9 && Cuts::abspid == PID::MUON); // Dressed leptons Cut lep_cuts = Cuts::pT > 7*GeV && Cuts::abseta < 2.5; DressedLeptons dressed_leps(photon_fs, (_mode == 2 ? el_fs : mu_fs), 0.1, lep_cuts); declare(dressed_leps, "DressedLeptons"); // In-acceptance leptons for lepton veto PromptFinalState veto_lep_fs(Cuts::abseta < 4.9 && (Cuts::abspid == PID::ELECTRON || Cuts::abspid == PID::MUON)); veto_lep_fs.acceptTauDecays(); veto_lep_fs.acceptMuonDecays(); DressedLeptons veto_lep(photon_fs, veto_lep_fs, 0.1, lep_cuts); declare(veto_lep, "VetoLeptons"); // MET VetoedFinalState met_fs(!(Cuts::abseta > 2.5 && Cuts::abspid == PID::MUON)); if (_mode) met_fs.addVetoOnThisFinalState(dressed_leps); declare(MissingMomentum(met_fs), "MET"); // Jet collection FastJets jets(FinalState(Cuts::abseta < 4.9), FastJets::ANTIKT, 0.4, JetAlg::Muons::NONE, JetAlg::Invisibles::NONE); declare(jets, "Jets"); _h["met_mono"] = bookHandler(1, 1, 2); _h["met_vbf" ] = bookHandler(2, 1, 2); _h["mjj_vbf" ] = bookHandler(3, 1, 2); _h["dphijj_vbf"] = bookHandler(4, 1, 2); } HistoHandler bookHandler(unsigned int id_d, unsigned int id_x, unsigned int id_y) { HistoHandler dummy; if (_mode < 2) { // numerator mode const string histName = "_" + makeAxisCode(id_d, id_x, id_y); book(dummy.histo, histName, refData(id_d, id_x, id_y)); // hidden auxiliary output book(dummy.scatter, id_d, id_x, id_y - 1, true); // ratio dummy.d = id_d; dummy.x = id_x; dummy.y = id_y; } else { book(dummy.histo, id_d, id_x, 4); // denominator mode } return dummy; } bool isBetweenJets(const Jet& probe, const Jet& boundary1, const Jet& boundary2) { const double y_p = probe.rapidity(); const double y_b1 = boundary1.rapidity(); const double y_b2 = boundary2.rapidity(); const double y_min = std::min(y_b1, y_b2); const double y_max = std::max(y_b1, y_b2); return (y_p > y_min && y_p < y_max); } int centralJetVeto(Jets& jets) { if (jets.size() < 2) return 0; const Jet bj1 = jets.at(0); const Jet bj2 = jets.at(1); // Start loop at the 3rd hardest pT jet int n_between = 0; for (size_t i = 2; i < jets.size(); ++i) { const Jet j = jets.at(i); if (isBetweenJets(j, bj1, bj2) && j.pT() > 25*GeV) ++n_between; } return n_between; } /// Perform the per-event analysis void analyze(const Event& event) { // Require 0 (Znunu) or 2 (Zll) dressed leptons bool isZll = bool(_mode); const vector &vetoLeptons = applyProjection(event, "VetoLeptons").dressedLeptons(); const vector &all_leps = applyProjection(event, "DressedLeptons").dressedLeptons(); if (!isZll && vetoLeptons.size()) vetoEvent; if ( isZll && all_leps.size() != 2) vetoEvent; vector leptons; bool pass_Zll = true; if (isZll) { // Sort dressed leptons by pT if (all_leps[0].pt() > all_leps[1].pt()) { leptons.push_back(all_leps[0]); leptons.push_back(all_leps[1]); } else { leptons.push_back(all_leps[1]); leptons.push_back(all_leps[0]); } // Leading lepton pT cut pass_Zll &= leptons[0].pT() > 80*GeV; // Opposite-charge requirement - pass_Zll &= threeCharge(leptons[0]) + threeCharge(leptons[1]) == 0; + pass_Zll &= charge3(leptons[0]) + charge3(leptons[1]) == 0; // Z-mass requirement const double Zmass = (leptons[0].mom() + leptons[1].mom()).mass(); pass_Zll &= (Zmass >= 66*GeV && Zmass <= 116*GeV); } if (!pass_Zll) vetoEvent; // Get jets and remove those within dR = 0.5 of a dressed lepton Jets jets = applyProjection(event, "Jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::absrap < 4.4); for (const DressedLepton& lep : leptons) ifilter_discard(jets, deltaRLess(lep, 0.5)); const size_t njets = jets.size(); if (!njets) vetoEvent; const int njets_gap = centralJetVeto(jets); double jpt1 = jets[0].pT(); double jeta1 = jets[0].eta(); double mjj = 0., jpt2 = 0., dphijj = 0.; if (njets >= 2) { mjj = (jets[0].momentum() + jets[1].momentum()).mass(); jpt2 = jets[1].pT(); dphijj = deltaPhi(jets[0], jets[1]); } // MET Vector3 met_vec = apply(event, "MET").vectorMPT(); double met = met_vec.mod(); // Cut on deltaPhi between MET and first 4 jets, but only if jet pT > 30 GeV bool dphi_fail = false; for (size_t i = 0; i < jets.size() && i < 4; ++i) { dphi_fail |= (deltaPhi(jets[i], met_vec) < 0.4 && jets[i].pT() > 30*GeV); } const bool pass_met_dphi = met > 200*GeV && !dphi_fail; const bool pass_vbf = pass_met_dphi && mjj > 200*GeV && jpt1 > 80*GeV && jpt2 > 50*GeV && njets >= 2 && !njets_gap; const bool pass_mono = pass_met_dphi && jpt1 > 120*GeV && fabs(jeta1) < 2.4; if (pass_mono) _h["met_mono"].fill(met); if (pass_vbf) { _h["met_vbf"].fill(met/GeV); _h["mjj_vbf"].fill(mjj/GeV); _h["dphijj_vbf"].fill(dphijj); } } /// Normalise, scale and otherwise manipulate histograms here void finalize() { const double sf(crossSection() / femtobarn / sumOfWeights()); for (map::iterator hit = _h.begin(); hit != _h.end(); ++hit) { scale(hit->second.histo, sf); if (_mode < 2) constructRmiss(hit->second); } } void constructRmiss(HistoHandler& handler) { // Load transfer function from reference data file const YODA::Scatter2D& rmiss = refData(handler.d, handler.x, handler.y); const YODA::Scatter2D& numer = refData(handler.d, handler.x, handler.y + 1); const YODA::Scatter2D& denom = refData(handler.d, handler.x, handler.y + 2); for (size_t i = 0; i < handler.scatter->numPoints(); ++i) { const Point2D& r = rmiss.point(i); // SM Rmiss const Point2D& n = numer.point(i); // SM numerator const Point2D& d = denom.point(i); // SM denominator const HistoBin1D& b = handler.histo->bin(i); // BSM double bsmy; try { bsmy = b.height(); } catch (const Exception&) { // LowStatsError or WeightError bsmy = 0; } double bsmey; try { bsmey = b.heightErr(); } catch (const Exception&) { // LowStatsError or WeightError bsmey = 0; } // Combined numerator double sm_plus_bsm = n.y() + bsmy; // Rmiss central value double rmiss_y = safediv(sm_plus_bsm, d.y()); // Ratio error (Rmiss = SM_num/SM_denom + BSM/SM_denom ~ Rmiss_SM + BSM/SM_denom double rmiss_p = sqrt(r.yErrPlus()*r.yErrPlus() + safediv(bsmey*bsmey, d.y()*d.y())); double rmiss_m = sqrt(r.yErrMinus()*r.yErrMinus() + safediv(bsmey*bsmey, d.y()*d.y())); // Set new values Point2D& p = handler.scatter->point(i); // (SM + BSM) Rmiss p.setY(rmiss_y); p.setYErrMinus(rmiss_m); p.setYErrPlus(rmiss_p); } } protected: // Analysis-mode switch size_t _mode; /// Histograms map _h; }; /// ATLAS pTmiss+jets specialisation for Znunu channel class ATLAS_2017_I1609448_Znunu : public ATLAS_2017_I1609448 { public: ATLAS_2017_I1609448_Znunu() : ATLAS_2017_I1609448("ATLAS_2017_I1609448_Znunu") { _mode = 0; } }; /// ATLAS pTmiss+jets specialisation for Zmumu channel class ATLAS_2017_I1609448_Zmumu : public ATLAS_2017_I1609448 { public: ATLAS_2017_I1609448_Zmumu() : ATLAS_2017_I1609448("ATLAS_2017_I1609448_Zmumu") { _mode = 1; } }; /// ATLAS pTmiss+jets specialisation for Zee channel class ATLAS_2017_I1609448_Zee : public ATLAS_2017_I1609448 { public: ATLAS_2017_I1609448_Zee() : ATLAS_2017_I1609448("ATLAS_2017_I1609448_Zee") { _mode = 2; } }; // Hooks for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448); DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448_Znunu); DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448_Zmumu); DECLARE_RIVET_PLUGIN(ATLAS_2017_I1609448_Zee); } diff --git a/analyses/pluginCMS/CMS_2012_I1111014.cc b/analyses/pluginCMS/CMS_2012_I1111014.cc --- a/analyses/pluginCMS/CMS_2012_I1111014.cc +++ b/analyses/pluginCMS/CMS_2012_I1111014.cc @@ -1,185 +1,185 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/JetShape.hh" namespace Rivet { /// @brief CMS jet shape analysis /// @author Andreas Hinzmann class CMS_2012_I1111014 : public Analysis { public: /// Constructor CMS_2012_I1111014() : Analysis("CMS_2012_I1111014") { } /// @name Analysis methods //@{ void init() { // Set up projections const FinalState fs(-5.0, 5.0); declare(fs, "FS"); FastJets fj5(fs, FastJets::ANTIKT, 0.5); declare(fj5, "Jets5"); FastJets fj7(fs, FastJets::ANTIKT, 0.7); declare(fj7, "Jets7"); // Specify pT bins _ptedges = {{ 20.,25.,30.,40.,50.,60.,70.,80.,90.,100.,110.,125.,140.,160.,180.,200.,225.,250.,300.,400.,500.,600.,1000. }}; _yedges = {{ 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0 }}; // Register a jet shape projection and histogram for each pT bin unsigned int histo_counter=1; for (size_t j = 0; j < 6; ++j) { for (size_t i = 0; i < 22; ++i) { if (i > 20 && j == 3) continue; if (i > 18 && j >= 4) continue; // Set up projections for each (pT,y) bin _jsnames_pT[i][j] = "JetShape" + to_str(i) + "_" + to_str(j); const JetShape jsp(fj7, 0.0, 0.7, 7, _ptedges[i], _ptedges[i+1], _yedges[j], _yedges[j+1], RAPIDITY); declare(jsp, _jsnames_pT[i][j]); // Book profile histograms for each (pT,y) bin book(_profhistRho_pT[i][j], histo_counter, 1, 1); histo_counter+=1; } } book(_profhistNch[0], 126, 1, 1); book(_profhistNch[1], 126, 1, 2); book(_profhistDr[0], 127, 1, 1); book(_profhistDr[1], 127, 1, 2); book(_profhistDeta, "TMP/Deta", refData(127,1,1)); book(_profhistDphi, "TMP/Dphi", refData(127,1,1)); book(_profhistAsym, "d128-x01-y01", true); } /// Do the analysis void analyze(const Event& evt) { // Get jets and require at least one to pass pT and y cuts Jets jets7 = apply(evt, "Jets7") .jetsByPt(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) && Cuts::absrap < 3.0); if(jets7.size()>2) jets7.resize(2); // Use only the first two jets MSG_DEBUG("Jet (R=0.7) multiplicity before cuts = " << jets7.size()); if (jets7.size() == 0) { MSG_DEBUG("No jets (R=0.7) found in required pT and rapidity range"); vetoEvent; } // Calculate and histogram jet shapes for (size_t jy = 0; jy < 6; ++jy) { for (size_t ipt = 0; ipt < 22; ++ipt) { if (ipt > 20 && jy == 3) continue; if (ipt > 18 && jy >= 4) continue; JetShape jsipt = apply(evt, _jsnames_pT[ipt][jy]); jsipt.calc(jets7); for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) { for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) { const double r_rho = jsipt.rBinMid(rbin); _profhistRho_pT[ipt][jy]->fill(r_rho, (1./0.1)*jsipt.diffJetShape(ijet, rbin)); } } } } // Get jets and require at least one to pass pT and y cuts Jets jets5 = apply(evt, "Jets5") .jetsByPt(Cuts::ptIn(50*GeV, 1000*GeV) && Cuts::absrap < 2.0); // Calculate and histogram charged jet shapes for (const Jet& jet : jets5) { double ncharge = 0; double eta=0; double phi=0; double sumpt=0; for (const Particle& p : jet.particles()) { - if ((p.pT() < 0.5) || (p.threeCharge()==0) || (abs(p.pdgId())==11) || (abs(p.pdgId())==13)) continue; + if ((p.pT() < 0.5) || (p.charge3()==0) || (abs(p.pdgId())==11) || (abs(p.pdgId())==13)) continue; ncharge++; sumpt+=p.pT(); eta+=p.pT()*p.eta(); phi+=p.pT()*mapAngleMPiToPi(p.phi()-jet.phi()); } if (jet.absrap()<1.0) { _profhistNch[0]->fill(jet.pT(), ncharge ); } else if (jet.absrap()<2.0) { _profhistNch[1]->fill(jet.pT(), ncharge ); } if (sumpt==0) continue; eta/=sumpt; phi/=sumpt; double deta=0; double dphi=0; for (const Particle& p : jet.particles()) { - if ((p.pT() < 0.5) || (p.threeCharge()==0) || (abs(p.pdgId())==11) || (abs(p.pdgId())==13)) continue; + if ((p.pT() < 0.5) || (p.charge3()==0) || (abs(p.pdgId())==11) || (abs(p.pdgId())==13)) continue; deta+=p.pT()*pow(p.eta()-eta,2); dphi+=p.pT()*pow(mapAngleMPiToPi(p.phi()-phi-jet.phi()),2); } deta/=sumpt; dphi/=sumpt; if ((dphi==0)||(deta==0)) continue; if (jet.absrap()<1.0) { _profhistDr[0]->fill(jet.pT(), deta+dphi ); _profhistDeta->fill(jet.pT(), deta ); _profhistDphi->fill(jet.pT(), dphi ); } else if (jet.absrap()<2.0) { _profhistDr[1]->fill(jet.pT(), deta+dphi ); } } } // Finalize void finalize() { for (unsigned int i = 0; i < _profhistAsym->numPoints(); ++i) { if((_profhistDeta->bin(i).numEntries()<2)||(_profhistDphi->bin(i).numEntries()<2)) continue; if((_profhistDeta->bin(i).mean()==0)||(_profhistDphi->bin(i).mean()==0)) continue; double mean_ratio=_profhistDeta->bin(i).mean() / _profhistDphi->bin(i).mean(); double mean_error=mean_ratio*sqrt(pow(_profhistDeta->bin(i).stdErr()/_profhistDeta->bin(i).mean(),2)+pow(_profhistDphi->bin(i).stdErr()/_profhistDphi->bin(i).mean(),2)); _profhistAsym->point(i).setY(mean_ratio,mean_error); } } //@} private: /// @name Analysis data //@{ /// Jet \f$ p_\perp\f$ bins. vector _ptedges; // This can't be a raw array if we want to initialise it non-painfully vector _yedges; /// JetShape projection name for each \f$p_\perp\f$ bin. string _jsnames_pT[22][6]; //@} /// @name Histograms //@{ Profile1DPtr _profhistRho_pT[22][6]; Profile1DPtr _profhistNch[2]; Profile1DPtr _profhistDr[2]; Profile1DPtr _profhistDeta; Profile1DPtr _profhistDphi; Scatter2DPtr _profhistAsym; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2012_I1111014); } diff --git a/analyses/pluginD0/D0_2010_S8821313.cc b/analyses/pluginD0/D0_2010_S8821313.cc --- a/analyses/pluginD0/D0_2010_S8821313.cc +++ b/analyses/pluginD0/D0_2010_S8821313.cc @@ -1,105 +1,105 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class D0_2010_S8821313 : public Analysis { public: /// Constructor D0_2010_S8821313() : Analysis("D0_2010_S8821313") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// Initialise and register projections FinalState fs; Cut cuts = (Cuts::abseta < 1.1 || Cuts::absetaIn( 1.5, 3.0)) && Cuts::pT > 20*GeV; ZFinder zfinder_ee(fs, cuts, PID::ELECTRON, 70*GeV, 110*GeV, 0.2, ZFinder::ClusterPhotons::NODECAY, ZFinder::AddPhotons::YES); declare(zfinder_ee, "zfinder_ee"); ZFinder zfinder_mm(fs, Cuts::abseta < 2 && Cuts::pT > 15*GeV, PID::MUON, 70*GeV, 110*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO); declare(zfinder_mm, "zfinder_mm"); /// Book histograms here {Histo1DPtr tmp; _h_phistar_ee.add(0.0, 1.0, book(tmp, 1, 1, 1));} {Histo1DPtr tmp; _h_phistar_ee.add(1.0, 2.0, book(tmp, 1, 1, 2));} {Histo1DPtr tmp; _h_phistar_ee.add(2.0, 10.0,book(tmp, 1, 1, 3));} {Histo1DPtr tmp; _h_phistar_mm.add(0.0, 1.0, book(tmp, 2, 1, 1));} {Histo1DPtr tmp; _h_phistar_mm.add(1.0, 2.0, book(tmp, 2, 1, 2));} } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const ZFinder& zfinder_ee = apply(event, "zfinder_ee"); if (zfinder_ee.bosons().size() == 1) { Particles ee = zfinder_ee.constituents(); std::sort(ee.begin(), ee.end(), cmpMomByPt); - const FourMomentum& eminus = PID::threeCharge(ee[0].pid()) < 0 ? ee[0].momentum() : ee[1].momentum(); - const FourMomentum& eplus = PID::threeCharge(ee[0].pid()) < 0 ? ee[1].momentum() : ee[0].momentum(); + const FourMomentum& eminus = PID::charge3(ee[0].pid()) < 0 ? ee[0].momentum() : ee[1].momentum(); + const FourMomentum& eplus = PID::charge3(ee[0].pid()) < 0 ? ee[1].momentum() : ee[0].momentum(); double phi_acop = M_PI - mapAngle0ToPi(eminus.phi() - eplus.phi()); double costhetastar = tanh((eminus.eta() - eplus.eta())/2); double sin2thetastar = 1 - sqr(costhetastar); if (sin2thetastar < 0) sin2thetastar = 0; const double phistar = tan(phi_acop/2) * sqrt(sin2thetastar); const FourMomentum& zmom = zfinder_ee.bosons()[0].momentum(); _h_phistar_ee.fill(zmom.rapidity(), phistar, weight); } const ZFinder& zfinder_mm = apply(event, "zfinder_mm"); if (zfinder_mm.bosons().size() == 1) { Particles mm = zfinder_mm.constituents(); std::sort(mm.begin(), mm.end(), cmpMomByPt); - const FourMomentum& mminus = PID::threeCharge(mm[0].pid()) < 0 ? mm[0].momentum() : mm[1].momentum(); - const FourMomentum& mplus = PID::threeCharge(mm[0].pid()) < 0 ? mm[1].momentum() : mm[0].momentum(); + const FourMomentum& mminus = PID::charge3(mm[0].pid()) < 0 ? mm[0].momentum() : mm[1].momentum(); + const FourMomentum& mplus = PID::charge3(mm[0].pid()) < 0 ? mm[1].momentum() : mm[0].momentum(); double phi_acop = M_PI - mapAngle0ToPi(mminus.phi() - mplus.phi()); double costhetastar = tanh((mminus.eta() - mplus.eta())/2); double sin2thetastar = 1 - sqr(costhetastar); if (sin2thetastar < 0) sin2thetastar = 0; const double phistar = tan(phi_acop/2) * sqrt(sin2thetastar); const FourMomentum& zmom = zfinder_mm.bosons()[0].momentum(); _h_phistar_mm.fill(zmom.rapidity(), phistar, weight); } } /// Normalise histograms etc., after the run void finalize() { for (Histo1DPtr hist : _h_phistar_ee.histos()) normalize(hist); for (Histo1DPtr hist : _h_phistar_mm.histos()) normalize(hist); } //@} private: /// @name Histograms //@{ BinnedHistogram _h_phistar_ee; BinnedHistogram _h_phistar_mm; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2010_S8821313); } diff --git a/analyses/pluginD0/D0_2015_I1324946.cc b/analyses/pluginD0/D0_2015_I1324946.cc --- a/analyses/pluginD0/D0_2015_I1324946.cc +++ b/analyses/pluginD0/D0_2015_I1324946.cc @@ -1,101 +1,101 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ZFinder.hh" namespace Rivet { class D0_2015_I1324946 : public Analysis { public: /// Constructor D0_2015_I1324946() : Analysis("D0_2015_I1324946") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; ZFinder zfinder_mm(fs, Cuts::abseta < 2 && Cuts::pT > 15*GeV, PID::MUON, 30*GeV, 500*GeV, 0.0, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::NO); declare(zfinder_mm, "zfinder_mm"); book(_h_phistar_mm_peak_central ,1, 1, 1); book(_h_phistar_mm_peak_forward ,1, 1, 2); book(_h_phistar_mm_low_central ,2, 1, 1); book(_h_phistar_mm_low_forward ,2, 1, 2); book(_h_phistar_mm_high1 ,3, 1, 1); book(_h_phistar_mm_high2 ,4, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; //70(event, "zfinder_mm"); if (zfinder_mm.bosons().size() == 1) { Particles mm = zfinder_mm.constituents(); std::sort(mm.begin(), mm.end(), cmpMomByPt); - const FourMomentum& mminus = PID::threeCharge(mm[0].pid()) < 0 ? mm[0].momentum() : mm[1].momentum(); - const FourMomentum& mplus = PID::threeCharge(mm[0].pid()) < 0 ? mm[1].momentum() : mm[0].momentum(); + const FourMomentum& mminus = PID::charge3(mm[0].pid()) < 0 ? mm[0].momentum() : mm[1].momentum(); + const FourMomentum& mplus = PID::charge3(mm[0].pid()) < 0 ? mm[1].momentum() : mm[0].momentum(); double phi_acop = M_PI - mapAngle0ToPi(mminus.phi() - mplus.phi()); double costhetastar = tanh((mminus.eta() - mplus.eta())/2); double sin2thetastar = 1 - sqr(costhetastar); if (sin2thetastar < 0) sin2thetastar = 0; const double phistar = tan(phi_acop/2) * sqrt(sin2thetastar); const FourMomentum& zmom = zfinder_mm.bosons()[0].momentum(); if (zmom.mass()<30*GeV || zmom.mass() >500*GeV) vetoEvent; if( zmom.mass()>70 && zmom.mass()<100 && zmom.absrap()<1.0) _h_phistar_mm_peak_central->fill(phistar, weight); if( zmom.mass()>70 && zmom.mass()<100 && zmom.absrap()>1.0 && zmom.absrap()<2.0) _h_phistar_mm_peak_forward->fill(phistar, weight); if( zmom.mass()>30 && zmom.mass()<60 && zmom.absrap()<1.0) _h_phistar_mm_low_central->fill(phistar, weight); if( zmom.mass()>30 && zmom.mass()<60 && zmom.absrap()>1.0 && zmom.absrap()<2.0) _h_phistar_mm_low_forward->fill(phistar, weight); if( zmom.mass()>160 && zmom.mass()<300) _h_phistar_mm_high1->fill(phistar, weight); if( zmom.mass()>300 && zmom.mass()<500) _h_phistar_mm_high2->fill(phistar, weight); } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_phistar_mm_low_central); normalize(_h_phistar_mm_low_forward); normalize(_h_phistar_mm_peak_central); normalize(_h_phistar_mm_peak_forward); normalize(_h_phistar_mm_high1); normalize(_h_phistar_mm_high2); } //} //@} private: /// @name Histograms //@{ Histo1DPtr _h_phistar_mm_low_central; Histo1DPtr _h_phistar_mm_low_forward; Histo1DPtr _h_phistar_mm_peak_central; Histo1DPtr _h_phistar_mm_peak_forward; Histo1DPtr _h_phistar_mm_high1; Histo1DPtr _h_phistar_mm_high2; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(D0_2015_I1324946); } diff --git a/analyses/pluginHERA/H1_1994_S2919893.cc b/analyses/pluginHERA/H1_1994_S2919893.cc --- a/analyses/pluginHERA/H1_1994_S2919893.cc +++ b/analyses/pluginHERA/H1_1994_S2919893.cc @@ -1,229 +1,229 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Math/Constants.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DISKinematics.hh" namespace Rivet { /// @brief H1 energy flow and charged particle spectra /// @author Peter Richardson /// Based on the equivalent HZTool analysis class H1_1994_S2919893 : public Analysis { public: /// Constructor H1_1994_S2919893() : Analysis("H1_1994_S2919893") {} /// @name Analysis methods //@{ /// Initialise projections and histograms void init() { // Projections declare(DISLepton(), "Lepton"); declare(DISKinematics(), "Kinematics"); declare(FinalState(), "FS"); // Histos book(_histEnergyFlowLowX ,1, 1, 1); book(_histEnergyFlowHighX ,1, 1, 2); book(_histEECLowX ,2, 1, 1); book(_histEECHighX ,2, 1, 2); book(_histSpectraW77 ,3, 1, 1); book(_histSpectraW122 ,3, 1, 2); book(_histSpectraW169 ,3, 1, 3); book(_histSpectraW117 ,3, 1, 4); book(_histPT2 ,4, 1, 1); book(_w77 .first, "TMP/w77_1"); book(_w122.first, "TMP/w122_1"); book(_w169.first, "TMP/w169_1"); book(_w117.first, "TMP/w117_1"); book(_wEnergy.first, "TMP/wEnergy_1"); book(_w77 .second, "TMP/w77_2"); book(_w122.second, "TMP/w122_2"); book(_w169.second, "TMP/w169_2"); book(_w117.second, "TMP/w117_2"); book(_wEnergy.second, "TMP/wEnergy_2"); } /// Analyse each event void analyze(const Event& event) { // Get the DIS kinematics const DISKinematics& dk = apply(event, "Kinematics"); const double x = dk.x(); const double w2 = dk.W2(); const double w = sqrt(w2); // Momentum of the scattered lepton const DISLepton& dl = apply(event,"Lepton"); const FourMomentum leptonMom = dl.out(); const double ptel = leptonMom.pT(); const double enel = leptonMom.E(); const double thel = leptonMom.angle(dk.beamHadron().mom())/degree; // Extract the particles other than the lepton const FinalState& fs = apply(event, "FS"); Particles particles; particles.reserve(fs.particles().size()); const GenParticle* dislepGP = dl.out().genParticle(); for (const Particle& p : fs.particles()) { const GenParticle* loopGP = p.genParticle(); if (loopGP == dislepGP) continue; particles.push_back(p); } // Cut on the forward energy double efwd = 0.0; for (const Particle& p : particles) { const double th = p.angle(dk.beamHadron())/degree; if (inRange(th, 4.4, 15)) efwd += p.E(); } // Apply the cuts // Lepton energy and angle, w2 and forward energy MSG_DEBUG("enel/GeV = " << enel/GeV << ", thel = " << thel << ", w2 = " << w2 << ", efwd/GeV = " << efwd/GeV); bool cut = enel/GeV > 14. && thel > 157. && thel < 172.5 && w2 >= 3000. && efwd/GeV > 0.5; if (!cut) vetoEvent; // Weight of the event (x < 1e-3 ? _wEnergy.first : _wEnergy.second)->fill(); // Boost to hadronic CM const LorentzTransform hcmboost = dk.boostHCM(); // Loop over the particles long ncharged(0); for (size_t ip1 = 0; ip1 < particles.size(); ++ip1) { const Particle& p = particles[ip1]; const double th = p.angle(dk.beamHadron().momentum()) / degree; // Boost momentum to lab const FourMomentum hcmMom = hcmboost.transform(p.momentum()); // Angular cut if (th <= 4.4) continue; // Energy flow histogram const double et = fabs(hcmMom.Et()); const double eta = hcmMom.eta(); (x < 1e-3 ? _histEnergyFlowLowX : _histEnergyFlowHighX)->fill(eta, et); - if (PID::threeCharge(p.pid()) != 0) { + if (PID::charge3(p.pid()) != 0) { /// @todo Use units in w comparisons... what are the units? if (w > 50. && w <= 200.) { double xf= 2 * hcmMom.z() / w; double pt2 = hcmMom.pT2(); if (w > 50. && w <= 100.) { _histSpectraW77 ->fill(xf); } else if (w > 100. && w <= 150.) { _histSpectraW122->fill(xf); } else if (w > 150. && w <= 200.) { _histSpectraW169->fill(xf); } _histSpectraW117->fill(xf); /// @todo Is this profile meant to be filled with 2 weight factors? _histPT2->fill(xf, pt2/GeV2); ++ncharged; } } // Energy-energy correlation if (th <= 8.) continue; double phi1 = p.phi(ZERO_2PI); double eta1 = p.eta(); double et1 = fabs(p.momentum().Et()); for (size_t ip2 = ip1+1; ip2 < particles.size(); ++ip2) { const Particle& p2 = particles[ip2]; //double th2 = beamAngle(p2.momentum(), order); double th2 = p2.angle(dk.beamHadron().momentum()) / degree; if (th2 <= 8.) continue; double phi2 = p2.phi(ZERO_2PI); /// @todo Use angle function double deltaphi = phi1 - phi2; if (fabs(deltaphi) > PI) deltaphi = fabs(fabs(deltaphi) - TWOPI); double eta2 = p2.eta(); double omega = sqrt(sqr(eta1-eta2) + sqr(deltaphi)); double et2 = fabs(p2.momentum().Et()); double wt = et1*et2 / sqr(ptel); (x < 1e-3 ? _histEECLowX : _histEECHighX)->fill(omega, wt); } } // Factors for normalization if (w > 50. && w <= 200.) { if (w <= 100.) { _w77.first ->fill(ncharged); _w77.second->fill(); } else if (w <= 150.) { _w122.first ->fill(ncharged); _w122.second->fill(); } else { _w169.first ->fill(ncharged); _w169.second->fill(); } _w117.first ->fill(ncharged); _w117.second->fill(); } } // Normalize inclusive single particle distributions to the average number of charged particles per event. void finalize() { normalize(_histSpectraW77, *_w77.first/ *_w77.second); normalize(_histSpectraW122, *_w122.first/ *_w122.second); normalize(_histSpectraW169, *_w169.first/ *_w169.second); normalize(_histSpectraW117, *_w117.first/ *_w117.second); scale(_histEnergyFlowLowX , 1./ *_wEnergy.first ); scale(_histEnergyFlowHighX, 1./ *_wEnergy.second); scale(_histEECLowX , 1./ *_wEnergy.first ); scale(_histEECHighX, 1./ *_wEnergy.second); } //@} private: /// Polar angle with right direction of the beam inline double beamAngle(const FourVector& v, bool order) { double thel = v.polarAngle()/degree; if (thel < 0) thel += 180.; if (!order) thel = 180 - thel; return thel; } /// @name Histograms //@{ Histo1DPtr _histEnergyFlowLowX, _histEnergyFlowHighX; Histo1DPtr _histEECLowX, _histEECHighX; Histo1DPtr _histSpectraW77, _histSpectraW122, _histSpectraW169, _histSpectraW117; Profile1DPtr _histPT2; //@} /// @name Storage of weights to calculate averages for normalisation //@{ pair _w77, _w122, _w169, _w117, _wEnergy; //@} }; DECLARE_RIVET_PLUGIN(H1_1994_S2919893); } diff --git a/analyses/pluginMC/MC_SUSY.cc b/analyses/pluginMC/MC_SUSY.cc --- a/analyses/pluginMC/MC_SUSY.cc +++ b/analyses/pluginMC/MC_SUSY.cc @@ -1,318 +1,318 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/LeadingParticlesFinalState.hh" namespace Rivet { /// @brief MC validation analysis for SUSY events /// @author Andy Buckley class MC_SUSY : public Analysis { public: /// Constructor MC_SUSY() : Analysis("MC_SUSY") { } /// @name Analysis methods //@{ // Book histograms void init() { // Basic final state const FinalState fs(-4.0, 4.0, 10*GeV); // Tracks and jets declare(ChargedFinalState(fs), "Tracks"); declare(FastJets(fs, FastJets::ANTIKT, 0.7), "Jets"); IdentifiedFinalState photonfs(fs); photonfs.acceptId(PID::PHOTON); declare(photonfs, "AllPhotons"); IdentifiedFinalState efs(fs); efs.acceptIdPair(PID::ELECTRON); declare(efs, "Electrons"); IdentifiedFinalState mufs(fs); mufs.acceptIdPair(PID::MUON); declare(mufs, "Muons"); MissingMomentum missing(fs); declare(missing, "MET"); LeadingParticlesFinalState lpfs(fs); lpfs.addParticleIdPair(PID::ELECTRON); lpfs.addParticleIdPair(PID::MUON); declare(lpfs, "LeadingParticles"); book(_hist_n_trk ,"n-trk", 50, 0.5, 300.5); book(_hist_phi_trk ,"phi-trk", 50, -PI, PI); book(_hist_eta_trk ,"eta-trk", 50, -4, 4); book(_hist_pt_trk ,"pt-trk", 100, 0.0, 1500); book(_hist_n_jet ,"n-jet", 21, -0.5, 20.5); book(_hist_phi_jet ,"phi-jet", 50, -PI, PI); book(_hist_eta_jet ,"eta-jet", 50, -4, 4); book(_hist_pt_jet ,"pt-jet", 100, 0.0, 1500); book(_hist_n_e ,"n-e", 11, -0.5, 10.5); book(_hist_phi_e ,"phi-e", 50, -PI, PI); book(_hist_eta_e ,"eta-e", 50, -4, 4); book(_hist_pt_e ,"pt-e", 100, 0.0, 500); book(_hist_n_mu ,"n-mu", 11, -0.5, 10.5); book(_hist_phi_mu ,"phi-mu", 50, -PI, PI); book(_hist_eta_mu ,"eta-mu", 50, -4, 4); book(_hist_pt_mu ,"pt-mu", 100, 0.0, 500); book(_hist_n_gamma ,"n-gamma", 11, -0.5, 10.5); book(_hist_phi_gamma ,"phi-gamma", 50, -PI, PI); book(_hist_eta_gamma ,"eta-gamma", 50, -4, 4); book(_hist_pt_gamma ,"pt-gamma", 100, 0.0, 500); book(_hist_n_gammaiso ,"n-gamma-iso", 11, -0.5, 10.5); book(_hist_phi_gammaiso ,"phi-gamma-iso", 50, -PI, PI); book(_hist_eta_gammaiso ,"eta-gamma-iso", 50, -4, 4); book(_hist_pt_gammaiso ,"pt-gamma-iso", 100, 0.0, 500); book(_hist_met ,"Etmiss", 100, 0.0, 1500); book(_hist_mll_ossf_ee ,"mll-ossf-ee", 50, 0.0, 500); book(_hist_mll_ossf_mumu ,"mll-ossf-mumu", 50, 0.0, 500); book(_hist_mll_osof_emu ,"mll-osof-emu", 50, 0.0, 500); book(_hist_mll_all_ossf_ee ,"mll-all-ossf-ee", 50, 0.0, 500); book(_hist_mll_all_ossf_mumu ,"mll-all-ossf-mumu", 50, 0.0, 500); book(_hist_mll_all_osof_emu ,"mll-all-osof-emu", 50, 0.0, 500); book(_hist_mll_2_ossf_ee ,"mll-2-ossf-ee", 50, 0.0, 500); book(_hist_mll_2_ossf_mumu ,"mll-2-ossf-mumu", 50, 0.0, 500); book(_hist_mll_2_osof_emu ,"mll-2-osof-emu", 50, 0.0, 500); /// @todo LSP eta, pT, phi, mass: no reliable cross-scenario LSP PID but /// maybe plot for all of chi^0_1, gravitino, sneutrino, gluino, ... or /// identify the LSP as any PID::isSUSY (?) particle with status = 1? } // Do the analysis void analyze(const Event& evt) { const FinalState& tracks = apply(evt, "Tracks"); if (tracks.particles().empty()) { MSG_DEBUG("Failed multiplicity cut"); vetoEvent; } // Fill track histos _hist_n_trk->fill(tracks.size()); for (const Particle& t : tracks.particles()) { const FourMomentum& p = t.momentum(); _hist_phi_trk->fill(mapAngleMPiToPi(p.phi())); _hist_eta_trk->fill(p.eta()); _hist_pt_trk->fill(p.pT()/GeV); } // Get jets and fill jet histos const FastJets& jetpro = apply(evt, "Jets"); const Jets jets = jetpro.jetsByPt(); MSG_DEBUG("Jet multiplicity = " << jets.size()); _hist_n_jet->fill(jets.size()); for (const Jet& j : jets) { const FourMomentum& pj = j.momentum(); _hist_phi_jet->fill(mapAngleMPiToPi(pj.phi())); _hist_eta_jet->fill(pj.eta()); _hist_pt_jet->fill(pj.pT()/GeV); } /// @todo Resum photons around electrons // Fill final state electron/positron histos const FinalState& efs = apply(evt, "Electrons"); _hist_n_e->fill(efs.size()); vector epluses, eminuses; for (const Particle& e : efs.particles()) { const FourMomentum& p = e.momentum(); _hist_phi_e->fill(mapAngleMPiToPi(p.phi())); _hist_eta_e->fill(p.eta()); _hist_pt_e->fill(p.pT()/GeV); // Add sufficiently hard leptons to collections for m_ll histo if (p.pT()/GeV > 20) { - if (PID::threeCharge(e.pid()) > 0) epluses += p; else eminuses += p; + if (PID::charge3(e.pid()) > 0) epluses += p; else eminuses += p; } } /// @todo Resum photons around muons // Fill final state muon/antimuon histos const FinalState& mufs = apply(evt, "Muons"); _hist_n_mu->fill(mufs.size()); vector mupluses, muminuses; for (const Particle& mu : mufs.particles()) { const FourMomentum& p = mu.momentum(); _hist_phi_mu->fill(mapAngleMPiToPi(p.phi())); _hist_eta_mu->fill(p.eta()); _hist_pt_mu->fill(p.pT()/GeV); // Add sufficiently hard leptons to collections for m_ll histo if (p.pT()/GeV > 20) { - if (PID::threeCharge(mu.pid()) > 0) mupluses += p; else muminuses += p; + if (PID::charge3(mu.pid()) > 0) mupluses += p; else muminuses += p; } } // Fill final state non-isolated photon histos const FinalState& allphotonfs = apply(evt, "AllPhotons"); _hist_n_gamma->fill(allphotonfs.size()); Particles isolatedphotons; for (const Particle& ph : allphotonfs.particles()) { const FourMomentum& p = ph.momentum(); _hist_phi_gamma->fill(mapAngleMPiToPi(p.phi())); _hist_eta_gamma->fill(p.eta()); _hist_pt_gamma->fill(p.pT()/GeV); // Select isolated photons bool isolated = true; for (const Jet& j : jets) { if (deltaR(j.momentum(), p) < 0.2) { isolated = false; break; } } if (isolated) isolatedphotons += ph; } // Fill final state isolated photon histos _hist_n_gammaiso->fill(isolatedphotons.size()); for (const Particle& ph_iso : isolatedphotons) { const FourMomentum& p = ph_iso.momentum(); _hist_phi_gammaiso->fill(mapAngleMPiToPi(p.phi())); _hist_eta_gammaiso->fill(p.eta()); _hist_pt_gammaiso->fill(p.pT()/GeV); } // Calculate and fill missing Et histos const MissingMomentum& met = apply(evt, "MET"); _hist_met->fill(met.vectorEt().mod()/GeV); // Choose highest-pT leptons of each sign and flavour for dilepton mass edges const FinalState& lpfs = apply(evt, "LeadingParticles"); bool eplus_ok(false), eminus_ok(false), muplus_ok(false), muminus_ok(false); FourMomentum peplus, peminus, pmuplus, pmuminus; for (const Particle& p : lpfs.particles()) { // Only use leptons above 20 GeV if (p.pT()/GeV < 20) continue; // Identify the PID const PdgId pid = p.pid(); if (pid == PID::ELECTRON) { eminus_ok = true; peminus = p.momentum(); } else if (pid == PID::POSITRON) { eplus_ok = true; peplus = p.momentum(); } else if (pid == PID::MUON) { muminus_ok = true; pmuminus = p.momentum(); } else if (pid == PID::ANTIMUON) { muplus_ok = true; pmuplus = p.momentum(); } else { throw Error("Unexpected particle type in leading particles FS!"); } } // m_ee if (eminus_ok && eplus_ok) { const double m_ee = FourMomentum(peplus + peminus).mass(); _hist_mll_ossf_ee->fill(m_ee/GeV); if (epluses.size() == 1 && eminuses.size() == 1) _hist_mll_2_ossf_ee->fill(m_ee/GeV); } // m_mumu if (muminus_ok && muplus_ok) { const double m_mumu = FourMomentum(pmuplus + pmuminus).mass(); _hist_mll_ossf_mumu->fill(m_mumu/GeV); if (mupluses.size() == 1 && muminuses.size() == 1) _hist_mll_2_ossf_mumu->fill(m_mumu/GeV); } // m_emu (both configurations) if (eminus_ok && muplus_ok) { const double m_emu = FourMomentum(pmuplus + peminus).mass(); _hist_mll_osof_emu->fill(m_emu/GeV); if (mupluses.size() == 1 && eminuses.size() == 1) _hist_mll_2_osof_emu->fill(m_emu/GeV); } if (muminus_ok && eplus_ok) { const double m_mue = FourMomentum(peplus + pmuminus).mass(); _hist_mll_osof_emu->fill(m_mue/GeV); if (epluses.size() == 1 && muminuses.size() == 1) _hist_mll_2_osof_emu->fill(m_mue/GeV); } // m_ll plots using *all* electrons, positrons, muons and antimuons // m_ee for (const FourMomentum& peplus : epluses) { for (const FourMomentum& peminus : eminuses) { const double m_ee = FourMomentum(peplus + peminus).mass(); _hist_mll_all_ossf_ee->fill(m_ee/GeV); } } // m_mumu for (const FourMomentum& pmuplus : mupluses) { for (const FourMomentum& pmuminus : muminuses) { const double m_mumu = FourMomentum(pmuplus + pmuminus).mass(); _hist_mll_all_ossf_mumu->fill(m_mumu/GeV); } } // m_emu (both configurations) for (const FourMomentum& pmuplus : mupluses) { for (const FourMomentum& peminus : eminuses) { const double m_emu = FourMomentum(pmuplus + peminus).mass(); _hist_mll_all_osof_emu->fill(m_emu/GeV); } } for (const FourMomentum& peplus : epluses) { for (const FourMomentum& pmuminus : muminuses) { const double m_mue = FourMomentum(peplus + pmuminus).mass(); _hist_mll_all_osof_emu->fill(m_mue/GeV); } } } void finalize() { /// @todo Normalisations } //@} private: Histo1DPtr _hist_n_trk, _hist_phi_trk, _hist_eta_trk, _hist_pt_trk; Histo1DPtr _hist_n_jet, _hist_phi_jet, _hist_eta_jet, _hist_pt_jet; Histo1DPtr _hist_n_e, _hist_phi_e, _hist_eta_e, _hist_pt_e; Histo1DPtr _hist_n_mu, _hist_phi_mu, _hist_eta_mu, _hist_pt_mu; Histo1DPtr _hist_n_gamma, _hist_phi_gamma, _hist_eta_gamma, _hist_pt_gamma; Histo1DPtr _hist_n_gammaiso, _hist_phi_gammaiso, _hist_eta_gammaiso, _hist_pt_gammaiso; Histo1DPtr _hist_met; Histo1DPtr _hist_mll_2_ossf_ee, _hist_mll_2_ossf_mumu, _hist_mll_2_osof_emu; Histo1DPtr _hist_mll_ossf_ee, _hist_mll_ossf_mumu, _hist_mll_osof_emu; Histo1DPtr _hist_mll_all_ossf_ee, _hist_mll_all_ossf_mumu, _hist_mll_all_osof_emu; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_SUSY); } diff --git a/analyses/pluginMC/MC_WINC.cc b/analyses/pluginMC/MC_WINC.cc --- a/analyses/pluginMC/MC_WINC.cc +++ b/analyses/pluginMC/MC_WINC.cc @@ -1,191 +1,191 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief MC validation analysis for inclusive W events class MC_WINC : public Analysis { public: /// Default constructor MC_WINC(string name="MC_WINC") : Analysis(name) { _dR=0.2; _lepton=PID::ELECTRON; } /// @name Analysis methods //@{ /// Book histograms void init() { FinalState fs; WFinder wfinder(fs, Cuts::abseta < 3.5 && Cuts::pT > 25*GeV, _lepton, 60.0*GeV, 100.0*GeV, 25.0*GeV, _dR); declare(wfinder, "WFinder"); double sqrts = sqrtS()>0. ? sqrtS() : 14000.; book(_h_W_mass ,"W_mass", 50, 55.0, 105.0); book(_h_W_pT ,"W_pT", logspace(100, 1.0, 0.5*sqrts)); book(_h_W_pT_peak ,"W_pT_peak", 25, 0.0, 125.0); book(_h_W_y ,"W_y", 40, -4.0, 4.0); book(_h_W_phi ,"W_phi", 25, 0.0, TWOPI); book(_h_Wplus_pT ,"Wplus_pT", logspace(25, 10.0, 0.5*sqrts)); book(_h_Wminus_pT ,"Wminus_pT", logspace(25, 10.0, 0.5*sqrts)); book(_h_lepton_pT ,"lepton_pT", logspace(100, 10.0, 0.25*sqrts)); book(_h_lepton_eta ,"lepton_eta", 40, -4.0, 4.0); book(_htmp_dsigminus_deta ,"lepton_dsigminus_deta", 20, 0.0, 4.0); book(_htmp_dsigplus_deta ,"lepton_dsigplus_deta", 20, 0.0, 4.0); book(_h_asym, "W_chargeasymm_eta"); book(_h_asym_pT, "W_chargeasymm_pT"); } /// Do the analysis void analyze(const Event & e) { const double weight = 1.0; const WFinder& wfinder = apply(e, "WFinder"); if (wfinder.bosons().size() != 1) { vetoEvent; } double charge3_x_eta = 0; int charge3 = 0; FourMomentum emom; FourMomentum wmom(wfinder.bosons().front().momentum()); _h_W_mass->fill(wmom.mass(), weight); _h_W_pT->fill(wmom.pT(), weight); _h_W_pT_peak->fill(wmom.pT(), weight); _h_W_y->fill(wmom.rapidity(), weight); _h_W_phi->fill(wmom.phi(), weight); Particle l=wfinder.constituentLeptons()[0]; _h_lepton_pT->fill(l.pT(), weight); _h_lepton_eta->fill(l.eta(), weight); - if (PID::threeCharge(l.pid()) != 0) { + if (PID::charge3(l.pid()) != 0) { emom = l.momentum(); - charge3_x_eta = PID::threeCharge(l.pid()) * emom.eta(); - charge3 = PID::threeCharge(l.pid()); + charge3_x_eta = PID::charge3(l.pid()) * emom.eta(); + charge3 = PID::charge3(l.pid()); } assert(charge3_x_eta != 0); assert(charge3!=0); if (emom.Et() > 30/GeV) { if (charge3_x_eta < 0) { _htmp_dsigminus_deta->fill(emom.eta(), weight); } else { _htmp_dsigplus_deta->fill(emom.eta(), weight); } } if (charge3 < 0) { _h_Wminus_pT->fill(wmom.pT(), weight); } else { _h_Wplus_pT->fill(wmom.pT(), weight); } } /// Finalize void finalize() { scale(_h_W_mass, crossSection()/sumOfWeights()); scale(_h_W_pT, crossSection()/sumOfWeights()); scale(_h_W_pT_peak, crossSection()/sumOfWeights()); scale(_h_W_y, crossSection()/sumOfWeights()); scale(_h_W_phi, crossSection()/sumOfWeights()); scale(_h_lepton_pT, crossSection()/sumOfWeights()); scale(_h_lepton_eta, crossSection()/sumOfWeights()); // Construct asymmetry: (dsig+/deta - dsig-/deta) / (dsig+/deta + dsig-/deta) for each Et region divide(*_htmp_dsigplus_deta - *_htmp_dsigminus_deta, *_htmp_dsigplus_deta + *_htmp_dsigminus_deta, _h_asym); // // W charge asymmetry vs. pTW: dsig+/dpT / dsig-/dpT divide(_h_Wplus_pT, _h_Wminus_pT, _h_asym_pT); scale(_h_Wplus_pT, crossSection()/picobarn/sumOfWeights()); scale(_h_Wminus_pT, crossSection()/picobarn/sumOfWeights()); } //@} protected: /// @name Parameters for specialised e/mu and dressed/bare subclassing //@{ double _dR; PdgId _lepton; //@} private: /// @name Histograms //@{ Histo1DPtr _h_W_mass; Histo1DPtr _h_W_pT; Histo1DPtr _h_W_pT_peak; Histo1DPtr _h_W_y; Histo1DPtr _h_W_phi; Histo1DPtr _h_Wplus_pT; Histo1DPtr _h_Wminus_pT; Histo1DPtr _h_lepton_pT; Histo1DPtr _h_lepton_eta; Histo1DPtr _htmp_dsigminus_deta; Histo1DPtr _htmp_dsigplus_deta; Scatter2DPtr _h_asym; Scatter2DPtr _h_asym_pT; //@} }; struct MC_WINC_EL : public MC_WINC { MC_WINC_EL() : MC_WINC("MC_WINC_EL") { _dR = 0.2; _lepton = PID::ELECTRON; } }; struct MC_WINC_EL_BARE : public MC_WINC { MC_WINC_EL_BARE() : MC_WINC("MC_WINC_EL_BARE") { _dR = 0; _lepton = PID::ELECTRON; } }; struct MC_WINC_MU : public MC_WINC { MC_WINC_MU() : MC_WINC("MC_WINC_MU") { _dR = 0.2; _lepton = PID::MUON; } }; struct MC_WINC_MU_BARE : public MC_WINC { MC_WINC_MU_BARE() : MC_WINC("MC_WINC_MU_BARE") { _dR = 0; _lepton = PID::MUON; } }; // The hooks for the plugin system DECLARE_RIVET_PLUGIN(MC_WINC); DECLARE_RIVET_PLUGIN(MC_WINC_EL); DECLARE_RIVET_PLUGIN(MC_WINC_EL_BARE); DECLARE_RIVET_PLUGIN(MC_WINC_MU); DECLARE_RIVET_PLUGIN(MC_WINC_MU_BARE); } diff --git a/analyses/pluginMC/MC_WPOL.cc b/analyses/pluginMC/MC_WPOL.cc --- a/analyses/pluginMC/MC_WPOL.cc +++ b/analyses/pluginMC/MC_WPOL.cc @@ -1,155 +1,155 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief MC validation analysis for W polarisation class MC_WPOL : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor MC_WPOL() : Analysis("MC_WPOL") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs; WFinder wfinder(fs, Cuts::open(), PID::ELECTRON, 60.0*GeV, 100.0*GeV, 0.0*GeV, 0.0); declare(wfinder, "WFinder"); Beam beams; declare(beams, "Beams"); vector tags{"_wplus", "_wminus"}; _h_dists.resize(tags.size()); _h_histos.resize(tags.size()); for (size_t i=0; i0. ? sqrtS() : 14000.; book(_h_dists[i][0] ,"A0"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][1] ,"A1"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][2] ,"A2"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][3] ,"A3"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][4] ,"A4"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][5] ,"A5"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][6] ,"A6"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][7] ,"A7"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][8] ,"fL"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][9] ,"fR"+tags[i],logspace(100, 1.0, 0.5*sqrts)); book(_h_dists[i][10] ,"f0"+tags[i],logspace(100, 1.0, 0.5*sqrts)); _h_histos[i].resize(4,Histo1DPtr()); book(_h_histos[i][0] ,"thetastar"+tags[i],100,-1.0,1.0); book(_h_histos[i][1] ,"phistar"+tags[i],90,0.0,360.0); book(_h_histos[i][2] ,"thetastar_ptw20"+tags[i],100,-1.0,1.0); book(_h_histos[i][3] ,"phistar_ptw20"+tags[i],90,0.0,360.0); } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; const WFinder& wfinder = apply(event, "WFinder"); if (wfinder.bosons().size() != 1) { vetoEvent; } const ParticlePair& beams = apply(event, "Beams").beams(); FourMomentum pb1(beams.second.momentum()), pb2(beams.first.momentum()); Particle lepton = wfinder.constituentLeptons()[0]; FourMomentum pl(lepton.momentum()); - size_t idx = (PID::threeCharge(lepton.pid())>0 ? 0 : 1); + size_t idx = (PID::charge3(lepton.pid())>0 ? 0 : 1); FourMomentum plnu(wfinder.bosons()[0].momentum()); const LorentzTransform cms = LorentzTransform::mkFrameTransformFromBeta(plnu.betaVec()); Matrix3 zrot(plnu.p3(), Vector3(0.0, 0.0, 1.0)); pl=cms.transform(pl); pb1=cms.transform(pb1); pb2=cms.transform(pb2); Vector3 pl3=pl.p3(); Vector3 pb13=pb1.p3(); Vector3 pb23=pb2.p3(); pl3=zrot*pl3; pb13=zrot*pb13; pb23=zrot*pb23; Vector3 xref(cos(pb13.theta())>cos(pb23.theta())?pb13:pb23); Matrix3 xrot(Vector3(xref.x(), xref.y(), 0.0), Vector3(1.0, 0.0, 0.0)); pl3=xrot*pl3; double ptw(wfinder.bosons()[0].pT()/GeV); double thetas(pl3.theta()), phis(pl3.phi()); double costhetas(cos(thetas)), sinthetas(sin(thetas)); double cosphis(cos(phis)), sinphis(sin(phis)); if (phis<0.0) phis+=2.0*M_PI; _h_histos[idx][0]->fill(costhetas,weight); _h_histos[idx][1]->fill(phis*180.0/M_PI,weight); if (ptw>20.0) { _h_histos[idx][2]->fill(costhetas,weight); _h_histos[idx][3]->fill(phis*180.0/M_PI,weight); } _h_dists[idx][0]->fill(ptw,10.0/3.0*(1.0-3.0*sqr(costhetas))+2.0/3.0,weight); _h_dists[idx][1]->fill(ptw,10.0*sinthetas*costhetas*cosphis,weight); _h_dists[idx][2]->fill(ptw,10.0*sqr(sinthetas)*(sqr(cosphis)-sqr(sinphis)),weight); _h_dists[idx][3]->fill(ptw,4.0*sinthetas*cosphis,weight); _h_dists[idx][4]->fill(ptw,4.0*costhetas,weight); _h_dists[idx][5]->fill(ptw,4.0*sinthetas*sinphis,weight); _h_dists[idx][6]->fill(ptw,10.0*costhetas*sinthetas*sinphis,weight); _h_dists[idx][7]->fill(ptw,10.0*sqr(sinthetas)*cosphis*sinphis,weight); _h_dists[idx][8]->fill(ptw,0.5*sqr(1.0-costhetas)-(1.0-2.0*sqr(costhetas)),weight); _h_dists[idx][9]->fill(ptw,0.5*sqr(1.0+costhetas)-(1.0-2.0*sqr(costhetas)),weight); _h_dists[idx][10]->fill(ptw,5.0*sqr(sinthetas)-3.0,weight); } /// Normalise histograms etc., after the run void finalize() { for (size_t i=0; i<_h_histos.size(); ++i) { for (Histo1DPtr histo : _h_histos[i]) { scale(histo, crossSection()/picobarn/sumOfWeights()); } } } //@} private: /// @name Histograms //@{ vector > _h_dists; vector > _h_histos; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_WPOL); } diff --git a/analyses/pluginMisc/PDG_TAUS.cc b/analyses/pluginMisc/PDG_TAUS.cc --- a/analyses/pluginMisc/PDG_TAUS.cc +++ b/analyses/pluginMisc/PDG_TAUS.cc @@ -1,212 +1,212 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/TauFinder.hh" namespace Rivet { class PDG_TAUS : public Analysis { public: /// Constructor PDG_TAUS() : Analysis("PDG_TAUS") { } /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { TauFinder tauleptonic(TauFinder::DecayMode::LEPTONIC); // open cuts, leptonic decays declare(tauleptonic, "TauLeptonic"); TauFinder tauhadronic(TauFinder::DecayMode::HADRONIC); // open cuts, hadronic decays declare(tauhadronic, "TauHadronic"); populateDecayMap(); book(_h_ratio_mu ,1, 1, 1); book(_h_ratio_el ,1, 1, 2); book(_h_1prong_pinu ,2, 1, 1); book(_h_1prong_Kpnu ,2, 1, 2); book(_h_1prong_pipinu ,2, 1, 3); book(_h_1prong_Kppinu ,2, 1, 4); book(_h_1prong_pipipinu ,2, 1, 5); book(_h_1prong_Knpinu ,2, 1, 6); book(_h_3prong_pipipinu ,2, 2, 1); book(_h_5prong ,2, 3, 1); book(_weights_had, "TMP/weights_had"); book(_weights_mu, "TMP/weights_mu"); book(_weights_el, "TMP/weights_el"); } /// Perform the per-event analysis void analyze(const Event& e) { const TauFinder& taulep = apply(e, "TauLeptonic"); const TauFinder& tauhad = apply(e, "TauHadronic"); // Hadronic tau decays --- prong decays for(const Particle& tau : tauhad.taus()) { _weights_had->fill(); int prongs = countProngs(tau); // number of charged particles among decay products // Only do 1 prong decays here if (prongs == 1) { ////// Exclusive decay modes "1-prong" if (analyzeDecay(tau, decay_pids["pinu"], true)) _h_1prong_pinu->fill(1); if (analyzeDecay(tau, decay_pids["Kpnu"], true)) _h_1prong_Kpnu->fill(1); if (analyzeDecay(tau, decay_pids["pipinu"], true)) _h_1prong_pipinu->fill(1); if (analyzeDecay(tau, decay_pids["Kppinu"] , true)) _h_1prong_Kppinu->fill(1); if (analyzeDecay(tau, decay_pids["pipipinu"], true)) _h_1prong_pipipinu->fill(1); // Kshort, Klong --- (twice) filling the K0 labelled PDG histo if (analyzeDecay(tau, decay_pids["KSpinu"] , true)) _h_1prong_Knpinu->fill(1); if (analyzeDecay(tau, decay_pids["KLpinu"] , true)) _h_1prong_Knpinu->fill(1); } else if (prongs == 3) { if (analyzeDecay(tau, decay_pids["3pipipinu"], true)) _h_3prong_pipipinu->fill(1); } else if (prongs == 5 && !any(tau.children(), HasAbsPID(310))) _h_5prong->fill(1); } // Leptonic tau decays --- look for radiative and non-radiative 1 prong decays for(const Particle& tau : taulep.taus()) { int prongs = countProngs(tau); // number of charged particles among decay products // Only do 1 prong decays here if (prongs == 1) { analyzeRadiativeDecay(tau, decay_pids["muids"], _weights_mu, true, _h_ratio_mu); analyzeRadiativeDecay(tau, decay_pids["elids"], _weights_el, true, _h_ratio_el); } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_ratio_mu, 1. / *_weights_mu); scale(_h_ratio_el, 1. / *_weights_el); const YODA::Counter norm = *_weights_had + *_weights_mu + *_weights_el; scale(_h_1prong_pinu, 1./norm); scale(_h_1prong_Kpnu, 1./norm); scale(_h_1prong_pipinu, 1./norm); scale(_h_1prong_Kppinu, 1./norm); scale(_h_1prong_pipipinu, 1./norm); scale(_h_1prong_Knpinu, 1./norm); scale(_h_3prong_pipipinu, 1./norm); scale(_h_5prong, 1./norm); } // Short hand bool contains(Particle& mother, int id, bool abs=false) { if (abs) return any(mother.children(), HasAbsPID(id)); return any(mother.children(), HasPID(id)); } // Count charged decay products int countProngs(Particle mother) { int n_prongs = 0; for(Particle p : mother.children()) - if (p.threeCharge()!=0) ++n_prongs; + if (p.charge3()!=0) ++n_prongs; return n_prongs; } // Set up a lookup table for decays void populateDecayMap() { decay_pids["muids"] = {{ 13,14,16 }}; decay_pids["elids"] = {{ 11,12,16 }}; decay_pids["pinu"] = {{ 211,16 }}; decay_pids["Kpnu"] = {{ 321,16 }}; decay_pids["pipinu"] = {{ 111,211,16 }}; decay_pids["Kppinu"] = {{ 111,321,16 }}; decay_pids["pipipinu"] = {{ 111,111,211,16 }}; decay_pids["KSpinu"] = {{ 211,310,16 }}; decay_pids["KLpinu"] = {{ 211,130,16 }}; decay_pids["3pipipinu"] = {{ 211,211,211,16 }}; } bool analyzeDecay(Particle mother, vector ids, bool absolute) { // There is no point in looking for decays with less particles than to be analysed if (mother.children().size() == ids.size()) { bool decayfound = true; for (int id : ids) { if (!contains(mother, id, absolute)) decayfound = false; } return decayfound; } // end of first if return false; } // Look for radiative (and non-radiative) tau decays to fill a ratio histo void analyzeRadiativeDecay(Particle mother, vector ids, CounterPtr &w_incl, bool absolute, Histo1DPtr h_ratio) { // w_incl ... reference to a global weight counter for all leptonic tau decays // h_ratio ... pointer to ratio histo // There is no point in looking for decays with less particles than to be analysed if (mother.children().size() >= ids.size()) { bool decayfound = true; for (int id : ids) { if (!contains(mother, id, absolute)) decayfound = false; } // Do not increment counters if the specified decay products were not found if (decayfound) { w_incl->fill(); // the (global) weight counter for leptonic decays bool radiative = any(mother.children(), HasPID(PID::PHOTON)); // Only fill the histo if there is a radiative decay if (radiative) { // Iterate over decay products to find photon with 5 MeV energy for (const Particle& son : mother.children()) { if (son.pid() == PID::PHOTON) { // Require photons to have at least 5 MeV energy in the rest frame of the tau // boosted taus if (!mother.momentum().betaVec().isZero()) { LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mother.momentum().betaVec()); if (cms_boost.transform(son.momentum())[0]/MeV > 5.) { h_ratio->fill(1); break; } } // not boosted taus else { if (son.momentum()[0]/MeV > 5.) { h_ratio->fill(1); break; } } } } // end loop over decay products } // end of radiative } // end of decayfound } // end of first if } private: /// @name Histograms //@{ Histo1DPtr _h_ratio_mu, _h_ratio_el; Histo1DPtr _h_1prong_pinu, _h_1prong_Kpnu, _h_1prong_Kppinu, _h_1prong_pipinu, _h_1prong_pipipinu, _h_1prong_Knpinu; Histo1DPtr _h_3prong_pipipinu; Histo1DPtr _h_5prong; //@} CounterPtr _weights_had, _weights_mu, _weights_el; map > decay_pids; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(PDG_TAUS); } diff --git a/include/Rivet/Particle.hh b/include/Rivet/Particle.hh --- a/include/Rivet/Particle.hh +++ b/include/Rivet/Particle.hh @@ -1,707 +1,703 @@ // -*- C++ -*- #ifndef RIVET_Particle_HH #define RIVET_Particle_HH #include "Rivet/Particle.fhh" #include "Rivet/ParticleBase.hh" #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/Utils.hh" #include "Rivet/Math/LorentzTrans.hh" // NOTE: Rivet/Tools/ParticleUtils.hh included at the end #include "fastjet/PseudoJet.hh" namespace Rivet { /// Particle representation, either from a HepMC::GenEvent or reconstructed. class Particle : public ParticleBase { public: /// @name Constructors //@{ /// Default constructor. /// @note A particle without info is useless. This only exists to keep STL containers happy. Particle() : ParticleBase(), _original(nullptr), _id(PID::ANY) { } /// Constructor without GenParticle. Particle(PdgId pid, const FourMomentum& mom, const FourVector& pos=FourVector()) : ParticleBase(), _original(nullptr), _id(pid), _momentum(mom), _origin(pos) { } /// Constructor from a HepMC GenParticle pointer. Particle(const GenParticle* gp) : ParticleBase(), _original(gp), _id(gp->pdg_id()), _momentum(gp->momentum()) { const GenVertex* vprod = gp->production_vertex(); if (vprod != nullptr) { setOrigin(vprod->position().t(), vprod->position().x(), vprod->position().y(), vprod->position().z()); } } /// Constructor from a HepMC GenParticle. Particle(const GenParticle& gp) : ParticleBase(), _original(&gp), _id(gp.pdg_id()), _momentum(gp.momentum()) { const GenVertex* vprod = gp.production_vertex(); if (vprod != nullptr) { setOrigin(vprod->position().t(), vprod->position().x(), vprod->position().y(), vprod->position().z()); } } //@} /// @name Kinematic properties //@{ /// The momentum. const FourMomentum& momentum() const { return _momentum; } /// Set the momentum. Particle& setMomentum(const FourMomentum& momentum) { _momentum = momentum; return *this; } /// Set the momentum via components. Particle& setMomentum(double E, double px, double py, double pz) { _momentum = FourMomentum(E, px, py, pz); return *this; } /// Apply an active Lorentz transform to this particle Particle& transformBy(const LorentzTransform& lt); //@ /// @name Positional properties //@{ /// The origin position. const FourVector& origin() const { return _origin; } /// Set the origin position. Particle& setOrigin(const FourVector& position) { _origin = position; return *this; } /// Set the origin position via components. Particle& setOrigin(double t, double x, double y, double z) { _origin = FourMomentum(t, x, y, z); return *this; } //@} /// @name Other representations and implicit casts to momentum-like objects //@{ /// Converter to FastJet3 PseudoJet virtual fastjet::PseudoJet pseudojet() const { return fastjet::PseudoJet(mom().px(), mom().py(), mom().pz(), mom().E()); } /// Cast operator to FastJet3 PseudoJet operator PseudoJet () const { return pseudojet(); } /// Get a const pointer to the original GenParticle const GenParticle* genParticle() const { return _original; } /// Cast operator for conversion to GenParticle* operator const GenParticle* () const { return genParticle(); } //@} /// @name Particle ID code accessors //@{ /// This Particle's PDG ID code. PdgId pid() const { return _id; } /// Absolute value of the PDG ID code. PdgId abspid() const { return std::abs(_id); } /// This Particle's PDG ID code (alias). /// @deprecated Prefer the pid/abspid form PdgId pdgId() const { return _id; } //@} /// @name Charge //@{ /// The charge of this Particle. double charge() const { return PID::charge(pid()); } /// The absolute charge of this Particle. double abscharge() const { return PID::abscharge(pid()); } /// Three times the charge of this Particle (i.e. integer multiple of smallest quark charge). int charge3() const { return PID::charge3(pid()); } - /// Alias for charge3 - /// @deprecated Use charge3 - int threeCharge() const { return PID::threeCharge(pid()); } - /// Three times the absolute charge of this Particle (i.e. integer multiple of smallest quark charge). int abscharge3() const { return PID::abscharge3(pid()); } /// Is this Particle charged? bool isCharged() const { return charge3() != 0; } //@} /// @name Particle species //@{ /// Is this a hadron? bool isHadron() const { return PID::isHadron(pid()); } /// Is this a meson? bool isMeson() const { return PID::isMeson(pid()); } /// Is this a baryon? bool isBaryon() const { return PID::isBaryon(pid()); } /// Is this a lepton? bool isLepton() const { return PID::isLepton(pid()); } /// Is this a charged lepton? bool isChargedLepton() const { return PID::isChargedLepton(pid()); } /// Is this a neutrino? bool isNeutrino() const { return PID::isNeutrino(pid()); } /// Does this (hadron) contain a b quark? bool hasBottom() const { return PID::hasBottom(pid()); } /// Does this (hadron) contain a c quark? bool hasCharm() const { return PID::hasCharm(pid()); } // /// Does this (hadron) contain an s quark? // bool hasStrange() const { return PID::hasStrange(pid()); } /// Is this particle potentially visible in a detector? bool isVisible() const; //@} /// @name Constituents (for composite particles) //@{ /// Set direct constituents of this particle virtual void setConstituents(const Particles& cs, bool setmom=false); /// Add a single direct constituent to this particle virtual void addConstituent(const Particle& c, bool addmom=false); /// Add direct constituents to this particle virtual void addConstituents(const Particles& cs, bool addmom=false); /// Determine if this Particle is a composite of other Rivet Particles bool isComposite() const { return !constituents().empty(); } /// @brief Direct constituents of this particle, returned by reference /// /// The returned vector will be empty if this particle is non-composite, /// and its entries may themselves be composites. const Particles& constituents() const { return _constituents; } /// @brief Direct constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the sorting const Particles constituents(const ParticleSorter& sorter) const { return sortBy(constituents(), sorter); } /// @brief Direct constituents of this particle, filtered by a Cut /// @note Returns a copy, thanks to the filtering const Particles constituents(const Cut& c) const { return filter_select(constituents(), c); } /// @brief Direct constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the filtering and sorting const Particles constituents(const Cut& c, const ParticleSorter& sorter) const { return sortBy(constituents(c), sorter); } /// @brief Direct constituents of this particle, filtered by a selection functor /// @note Returns a copy, thanks to the filtering const Particles constituents(const ParticleSelector& selector) const { return filter_select(constituents(), selector); } /// @brief Direct constituents of this particle, filtered and sorted by functors /// @note Returns a copy, thanks to the filtering and sorting const Particles constituents(const ParticleSelector& selector, const ParticleSorter& sorter) const { return sortBy(constituents(selector), sorter); } /// @brief Fundamental constituents of this particle /// @note Returns {{*this}} if this particle is non-composite. Particles rawConstituents() const; /// @brief Fundamental constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the sorting const Particles rawConstituents(const ParticleSorter& sorter) const { return sortBy(rawConstituents(), sorter); } /// @brief Fundamental constituents of this particle, filtered by a Cut /// @note Returns a copy, thanks to the filtering const Particles rawConstituents(const Cut& c) const { return filter_select(rawConstituents(), c); } /// @brief Fundamental constituents of this particle, sorted by a functor /// @note Returns a copy, thanks to the filtering and sorting const Particles rawConstituents(const Cut& c, const ParticleSorter& sorter) const { return sortBy(rawConstituents(c), sorter); } /// @brief Fundamental constituents of this particle, filtered by a selection functor /// @note Returns a copy, thanks to the filtering const Particles rawConstituents(const ParticleSelector& selector) const { return filter_select(rawConstituents(), selector); } /// @brief Fundamental constituents of this particle, filtered and sorted by functors /// @note Returns a copy, thanks to the filtering and sorting const Particles rawConstituents(const ParticleSelector& selector, const ParticleSorter& sorter) const { return sortBy(rawConstituents(selector), sorter); } //@} /// @name Ancestry (for fundamental particles with a HepMC link) //@{ /// Get a list of the direct parents of the current particle (with optional selection Cut) /// /// @note This is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! Particles parents(const Cut& c=Cuts::OPEN) const; /// Get a list of the direct parents of the current particle (with selector function) /// /// @note This is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! Particles parents(const ParticleSelector& f) const { return filter_select(parents(), f); } /// Check whether any particle in the particle's parent list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWith(const ParticleSelector& f) const { return !parents(f).empty(); } /// Check whether any particle in the particle's parent list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWith(const Cut& c) const; /// Check whether any particle in the particle's parent list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWithout(const ParticleSelector& f) const { return hasParentWith([&](const Particle& p){ return !f(p); }); } /// Check whether any particle in the particle's parent list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasParentWithout(const Cut& c) const; /// Check whether a given PID is found in the particle's parent list /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! /// /// @deprecated Prefer e.g. hasParentWith(Cut::pid == 123) bool hasParent(PdgId pid) const; /// Get a list of the ancestors of the current particle (with optional selection Cut) /// /// @note By default only physical ancestors, with status=2, are returned. /// /// @note This is valid in MC, but may not be answerable experimentally -- /// use this function with care when replicating experimental analyses! Particles ancestors(const Cut& c=Cuts::OPEN, bool only_physical=true) const; /// Get a list of the direct parents of the current particle (with selector function) /// /// @note By default only physical ancestors, with status=2, are returned. /// /// @note This is valid in MC, but may not be answerable experimentally -- /// use this function with care when replicating experimental analyses! Particles ancestors(const ParticleSelector& f, bool only_physical=true) const { return filter_select(ancestors(Cuts::OPEN, only_physical), f); } /// Check whether any particle in the particle's ancestor list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWith(const ParticleSelector& f, bool only_physical=true) const { return !ancestors(f, only_physical).empty(); } /// Check whether any particle in the particle's ancestor list has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWith(const Cut& c, bool only_physical=true) const; /// Check whether any particle in the particle's ancestor list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWithout(const ParticleSelector& f, bool only_physical=true) const { return hasAncestorWith([&](const Particle& p){ return !f(p); }, only_physical); } /// Check whether any particle in the particle's ancestor list does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasAncestorWithout(const Cut& c, bool only_physical=true) const; /// Check whether a given PID is found in the particle's ancestor list /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! /// /// @deprecated Prefer hasAncestorWith(Cuts::pid == pid) etc. bool hasAncestor(PdgId pid, bool only_physical=true) const; /// @brief Determine whether the particle is from a b-hadron decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromBottom() const; /// @brief Determine whether the particle is from a c-hadron decay /// /// @note If a hadron contains b and c quarks it is considered a bottom /// hadron and NOT a charm hadron. /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromCharm() const; // /// @brief Determine whether the particle is from a s-hadron decay // /// // /// @note If a hadron contains b or c quarks as well as strange it is // /// considered a b or c hadron, but NOT a strange hadron. // /// // /// @note This question is valid in MC, but may not be perfectly answerable // /// experimentally -- use this function with care when replicating // /// experimental analyses! // bool fromStrange() const; /// @brief Determine whether the particle is from a hadron decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromHadron() const; /// @brief Determine whether the particle is from a tau decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromTau(bool prompt_taus_only=false) const; /// @brief Determine whether the particle is from a prompt tau decay /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromPromptTau() const { return fromTau(true); } /// @brief Determine whether the particle is from a tau which decayed hadronically /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool fromHadronicTau(bool prompt_taus_only=false) const; /// @brief Determine whether the particle is from a hadron or tau decay /// /// Specifically, walk up the ancestor chain until a status 2 hadron or /// tau is found, if at all. /// /// @note This question is valid in MC, but may not be perfectly answerable /// experimentally -- use this function with care when replicating /// experimental analyses! /// /// @deprecated Too vague: use fromHadron or fromHadronicTau bool fromDecay() const { return fromHadron() || fromPromptTau(); } /// @brief Shorthand definition of 'promptness' based on set definition flags /// /// A "direct" particle is one directly connected to the hard process. It is a /// preferred alias for "prompt", since it has no confusing implications about /// distinguishability by timing information. /// /// The boolean arguments allow a decay lepton to be considered direct if /// its parent was a "real" direct lepton. /// /// @note This one doesn't make any judgements about final-stateness bool isDirect(bool allow_from_direct_tau=false, bool allow_from_direct_mu=false) const; /// Alias for isDirect bool isPrompt(bool allow_from_prompt_tau=false, bool allow_from_prompt_mu=false) const { return isDirect(allow_from_prompt_tau, allow_from_prompt_mu); } //@} /// @name Decay info //@{ /// Whether this particle is stable according to the generator bool isStable() const; /// @todo isDecayed? How to restrict to physical particles? /// Get a list of the direct descendants from the current particle (with optional selection Cut) Particles children(const Cut& c=Cuts::OPEN) const; /// Get a list of the direct descendants from the current particle (with selector function) Particles children(const ParticleSelector& f) const { return filter_select(children(), f); } /// Check whether any direct child of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWith(const ParticleSelector& f) const { return !children(f).empty(); } /// Check whether any direct child of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWith(const Cut& c) const; /// Check whether any direct child of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWithout(const ParticleSelector& f) const { return hasChildWith([&](const Particle& p){ return !f(p); }); } /// Check whether any direct child of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasChildWithout(const Cut& c) const; /// Get a list of all the descendants from the current particle (with optional selection Cut) Particles allDescendants(const Cut& c=Cuts::OPEN, bool remove_duplicates=true) const; /// Get a list of all the descendants from the current particle (with selector function) Particles allDescendants(const ParticleSelector& f, bool remove_duplicates=true) const { return filter_select(allDescendants(Cuts::OPEN, remove_duplicates), f); } /// Check whether any descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWith(const ParticleSelector& f, bool remove_duplicates=true) const { return !allDescendants(f, remove_duplicates).empty(); } /// Check whether any descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWith(const Cut& c, bool remove_duplicates=true) const; /// Check whether any descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWithout(const ParticleSelector& f, bool remove_duplicates=true) const { return hasDescendantWith([&](const Particle& p){ return !f(p); }, remove_duplicates); } /// Check whether any descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasDescendantWithout(const Cut& c, bool remove_duplicates=true) const; /// Get a list of all the stable descendants from the current particle (with optional selection Cut) /// /// @todo Use recursion through replica-avoiding MCUtils functions to avoid bookkeeping duplicates /// @todo Insist that the current particle is post-hadronization, otherwise throw an exception? Particles stableDescendants(const Cut& c=Cuts::OPEN) const; /// Get a list of all the stable descendants from the current particle (with selector function) Particles stableDescendants(const ParticleSelector& f) const { return filter_select(stableDescendants(), f); } /// Check whether any stable descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWith(const ParticleSelector& f) const { return !stableDescendants(f).empty(); } /// Check whether any stable descendant of this particle has the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWith(const Cut& c) const; /// Check whether any stable descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWithout(const ParticleSelector& f) const { return hasStableDescendantWith([&](const Particle& p){ return !f(p); }); } /// Check whether any stable descendant of this particle does not have the requested property /// /// @note This question is valid in MC, but may not be answerable /// experimentally -- use this function with care when replicating /// experimental analyses! bool hasStableDescendantWithout(const Cut& c) const; /// Flight length (divide by mm or cm to get the appropriate units) double flightLength() const; //@} /// @name Duplicate testing //@{ /// @brief Determine whether a particle is the first in a decay chain to meet the function requirement inline bool isFirstWith(const ParticleSelector& f) const { if (!f(*this)) return false; //< This doesn't even meet f, let alone being the last to do so if (any(parents(), f)) return false; //< If a direct parent has this property, this isn't the first return true; } /// @brief Determine whether a particle is the first in a decay chain not to meet the function requirement inline bool isFirstWithout(const ParticleSelector& f) const { return isFirstWith([&](const Particle& p){ return !f(p); }); } /// @brief Determine whether a particle is the last in a decay chain to meet the function requirement inline bool isLastWith(const ParticleSelector& f) const { if (!f(*this)) return false; //< This doesn't even meet f, let alone being the last to do so if (any(children(), f)) return false; //< If a child has this property, this isn't the last return true; } /// @brief Determine whether a particle is the last in a decay chain not to meet the function requirement inline bool isLastWithout(const ParticleSelector& f) const { return isLastWith([&](const Particle& p){ return !f(p); }); } //@} protected: /// A pointer to the original GenParticle from which this Particle is projected (may be null) const GenParticle* _original; /// Constituent particles if this is a composite (may be empty) Particles _constituents; /// The PDG ID code for this Particle. PdgId _id; /// The momentum of this particle. FourMomentum _momentum; /// The creation position of this particle. FourVector _origin; }; /// @name String representation and streaming support //@{ /// Allow a Particle to be passed to an ostream. std::ostream& operator << (std::ostream& os, const Particle& p); /// Allow ParticlePair to be passed to an ostream. std::ostream& operator << (std::ostream& os, const ParticlePair& pp); //@} } #include "Rivet/Tools/ParticleUtils.hh" #endif diff --git a/include/Rivet/Tools/ParticleIdUtils.hh b/include/Rivet/Tools/ParticleIdUtils.hh --- a/include/Rivet/Tools/ParticleIdUtils.hh +++ b/include/Rivet/Tools/ParticleIdUtils.hh @@ -1,797 +1,793 @@ // -*- C++ -*- // // This file is part of MCUtils -- https://bitbucket.org/andybuckley/mcutils // Copyright (C) 2013-2016 Andy Buckley // // Embedding of MCUtils code in other projects is permitted provided this // notice is retained and the MCUtils namespace and include path are changed. // #ifndef RIVET_PARTICLEIDUTILS_HH #define RIVET_PARTICLEIDUTILS_HH /// @file Utility functions for querying PDG ID codes (many from HepPID) /// @author Andy Buckley #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Math/MathUtils.hh" namespace Rivet { namespace PID { /// @name Utility functions //@{ /// Absolute value /// @deprecated Just use abs()! inline int abspid(int pid) { return abs(pid); } /// PID digits (base 10) are: n nr nl nq1 nq2 nq3 nj /// The Location enum provides a convenient index into the PID. enum Location { nj=1, nq3, nq2, nq1, nl, nr, n, n8, n9, n10 }; /// Split the PID into constituent integers inline unsigned short _digit(Location loc, int pid) { // PID digits (base 10) are: n nr nl nq1 nq2 nq3 nj (cf. Location) int numerator = (int) std::pow(10.0, (loc-1)); return (abs(pid)/numerator) % 10; } /// Returns everything beyond the 7th digit (e.g. outside the numbering scheme) inline int _extraBits(int pid) { return abs(pid)/10000000; } /// @brief Return the first two digits if this is a "fundamental" particle /// @note ID = 100 is a special case (internal generator ID's are 81-100) inline int _fundamentalID(int pid) { if (_extraBits(pid) > 0) return 0; if (_digit(nq2,pid) == 0 && _digit(nq1,pid) == 0) { return abs(pid) % 10000; } else if (abs(pid) <= 100) { return abs(pid); } else { return 0; } } //@} /// @name Nucleus/ion functions //@{ /// @brief Is this a nucleus PID? /// /// This implements the 2006 Monte Carlo nuclear code scheme. /// Ion numbers are +/- 10LZZZAAAI. /// AAA is A - total baryon number /// ZZZ is Z - total charge /// L is the total number of strange quarks. /// I is the isomer number, with I=0 corresponding to the ground state. inline bool isNucleus(int pid) { // a proton can also be a Hydrogen nucleus if (abs(pid) == 2212) return true; // new standard: +/- 10LZZZAAAI if ((_digit(n10,pid) == 1) && (_digit(n9,pid) == 0)) { // charge should always be less than or equal to baryon number // the following line is A >= Z if ((abs(pid)/10)%1000 >= (abs(pid)/10000)%1000) return true; } return false; } /// Get the atomic number (number of protons) in a nucleus/ion /// @note Ion numbers are +/- 10LZZZAAAI. inline int nuclZ(int pid) { // A proton can also be a Hydrogen nucleus if (abs(pid) == 2212) { return 1; } if (isNucleus(pid)) return (abs(pid)/10000) % 1000; return 0; } /// Alias for nuclZ /// @deprecated Use nuclZ inline int Z(int pid) { return nuclZ(pid); } /// Get the atomic weight (number of nucleons) in a nucleus/ion /// @note Ion numbers are +/- 10LZZZAAAI. inline int nuclA(int pid) { // A proton can also be a Hydrogen nucleus, and we might as well also allow single neutrons if (abs(pid) == 2212 || abs(pid) == 2112) { return 1; } if (isNucleus(pid)) return (abs(pid)/10) % 1000; return 0; } /// Alias for nuclA /// @deprecated Use nuclA inline int A(int pid) { return nuclA(pid); } /// If this is a nucleus (ion), get nLambda /// @note Ion numbers are +/- 10LZZZAAAI. inline int nuclNlambda(int pid) { // a proton can also be a Hydrogen nucleus if (abs(pid) == 2212) { return 0; } if (isNucleus(pid)) return _digit(n8,pid); return 0; } /// Alias for nuclNlambda /// @deprecated Use nuclNlambda inline int lambda(int pid) { return nuclNlambda(pid); } //@} /// @name Quark composite functions //@{ /// Is this a pomeron, odderon, or generic reggeon? inline bool isReggeon(int pid) { return pid == 110 || pid == 990 || pid == 9990; } /// Check to see if this is a valid meson inline bool isMeson(int pid) { if (_extraBits(pid) > 0) return false; const int aid = abs(pid); if (aid == 130 || aid == 310 || aid == 210) return true; //< special cases for kaons if (aid <= 100) return false; if (_digit(nq1,pid) != 0) return false; if (_digit(nq2,pid) == 0) return false; if (_digit(nq3,pid) == 0) return false; if (_digit(nq2,pid) < _digit(nq3,pid)) return false; // EvtGen uses some odd numbers /// @todo Remove special-casing for EvtGen if (aid == 150 || aid == 350 || aid == 510 || aid == 530) return true; // Pomeron, Reggeon, etc. if (isReggeon(pid)) return false; //true; //< WTF? // Check for illegal antiparticles if (_digit(nj,pid) > 0 && _digit(nq3,pid) > 0 && _digit(nq2,pid) > 0 && _digit(nq1,pid) == 0) { return !(_digit(nq3,pid) == _digit(nq2,pid) && pid < 0); } return false; } /// Check to see if this is a valid baryon inline bool isBaryon(int pid) { if (_extraBits(pid) > 0) return false; if (abs(pid) <= 100) return false; if (_fundamentalID(pid) <= 100 && _fundamentalID(pid) > 0) return false; if (abs(pid) == 2110 || abs(pid) == 2210) return true; ///< @todo Why this special case with nJ = 0? What are these? Not listed in RPP MC doc... if (_digit(nj,pid) == 0) return false; if (_digit(nq1,pid) == 0 || _digit(nq2,pid) == 0 || _digit(nq3,pid) == 0) return false; return true; /// @todo This is more correct by the definition, but the PDG's entries 1212, 1214, 1216, 1218 and 2122, 2124, 2126, 2128 come out as invalid // if ((_digit(nq1,pid) >= _digit(nq2,pid) && _digit(nq2,pid) >= _digit(nq3,pid)) || // (_digit(nq1,pid) > _digit(nq3,pid) && _digit(nq3,pid) > _digit(nq2,pid)) || //< case 6b for lighter quarks in J=1 // (_digit(nq3,pid) > _digit(nq1,pid) && _digit(nq1,pid) > _digit(nq2,pid))) //< case 6e for extra states in excited multiplets // return true; // return false; } // Check to see if this is a valid diquark inline bool isDiquark(int pid) { if (_extraBits(pid) > 0) return false; if (abs(pid) <= 100) return false; if (_fundamentalID(pid) <= 100 && _fundamentalID(pid) > 0) return false; if (_digit(nq1,pid) == 0) return false; if (_digit(nq2,pid) == 0) return false; if (_digit(nq3,pid) != 0) return false; if (_digit(nq1,pid) < _digit(nq2,pid)) return false; if (_digit(nj,pid) > 0 && _digit(nq3,pid) == 0 && _digit(nq2,pid) > 0 && _digit(nq1,pid) > 0) return true; // diquark signature // EvtGen uses the diquarks for quark pairs, so, for instance, 5501 is a valid "diquark" for EvtGen // if (_digit(nj) == 1 && _digit(nq2) == _digit(nq1)) { // illegal // return false; // } else { // return true; // } return false; } /// @deprecated Use the nicer capitalisation isDiquark(pid) inline bool isDiQuark(int pid) { return isDiquark(pid); } /// Check to see if this is a valid pentaquark inline bool isPentaquark(int pid) { // a pentaquark is of the form 9abcdej, // where j is the spin and a, b, c, d, and e are quarks if (_extraBits(pid) > 0) return false; if (_digit(n,pid) != 9) return false; if (_digit(nr,pid) == 9 || _digit(nr,pid) == 0) return false; if (_digit(nj,pid) == 9 || _digit(nl,pid) == 0) return false; if (_digit(nq1,pid) == 0) return false; if (_digit(nq2,pid) == 0) return false; if (_digit(nq3,pid) == 0) return false; if (_digit(nj,pid) == 0) return false; // check ordering if (_digit(nq2,pid) > _digit(nq1,pid)) return false; if (_digit(nq1,pid) > _digit(nl,pid)) return false; if (_digit(nl,pid) > _digit(nr,pid)) return false; return true; } /// Is this a valid hadron ID? inline bool isHadron(int pid) { if (_extraBits(pid) > 0) return false; if (isMeson(pid)) return true; if (isBaryon(pid)) return true; if (isPentaquark(pid)) return true; return false; } //@} /// @name More general particle class identification functions //@{ /// Is this a valid lepton ID? inline bool isLepton(int pid) { if (_extraBits(pid) > 0) return false; if (_fundamentalID(pid) >= 11 && _fundamentalID(pid) <= 18) return true; return false; } /// Is this a fundamental SUSY particle? inline bool isSUSY(int pid) { // Fundamental SUSY particles have n = 1 or 2 if (_extraBits(pid) > 0) return false; if (_digit(n,pid) != 1 && _digit(n,pid) != 2) return false; if (_digit(nr,pid) != 0) return false; // Check fundamental part for SM PID on which it is based if (_fundamentalID(pid) == 0) return false; return true; } /// Is this an R-hadron? inline bool isRhadron(int pid) { // An R-hadron is of the form 10abcdj, // where j is the spin and a, b, c, and d are quarks or gluons if (_extraBits(pid) > 0) return false; if (_digit(n,pid) != 1) return false; if (_digit(nr,pid) != 0) return false; // Make sure this isn't a SUSY particle if (isSUSY(pid)) return false; // All R-hadrons have at least 3 core digits if (_digit(nq2,pid) == 0) return false; if (_digit(nq3,pid) == 0) return false; if (_digit(nj,pid) == 0) return false; return true; } inline bool isRHadron(int pid) { return isRhadron(pid); } /// Is this a technicolor particle? inline bool isTechnicolor(int pid) { if (_extraBits(pid) > 0) return false; return _digit(n,pid) == 3; } /// Is this an excited (composite) quark or lepton? inline bool isExcited(int pid) { if (_extraBits(pid) > 0) return false; return _digit(n,pid) == 4; } /// Is this a Kaluza-Klein excitation? inline bool isKK(int pid) { if (_extraBits(pid) > 0) return false; const int ndigit = _digit(n,pid); return ndigit == 5 || ndigit == 6; } /// Is this a graviton? inline bool isGraviton(int pid) { return pid == 39; } /// Is this a BSM particle (including graviton)? inline bool isBSM(int pid) { return isSUSY(pid) || isRhadron(pid) || isTechnicolor(pid) || isExcited(pid) || isKK(pid) || isGraviton(pid); } /// Check to see if this is a valid PID (i.e. matches any known scheme) inline bool _isValid(int pid) { // Starting with 99 means anything goes (but nothing is known) if (_digit(n,pid) == 9 && _digit(nr,pid) == 9) return true; // Check that extra bits are only used for nuclei if (_extraBits(pid) > 0) return isNucleus(pid); // Check that it fits into a standard non-nucleus convention if (isBSM(pid)) return true; if (isHadron(pid)) return true; if (_digit(n,pid) == 9 && _digit(nr,pid) == 0) return false; // could only have been a tentative hadron, but !isHadron if (isDiquark(pid)) return true; if (isReggeon(pid)) return true; // // Quark digit orderings required by the standard // if (_digit(nq1,pid) != 0 && _digit(nq1,pid) < _digit(nq2,pid)) return false; // if (_digit(nq2,pid) != 0 && _digit(nq2,pid) < _digit(nq3,pid)) return false; // Final check on fundamental ID return (_fundamentalID(pid) > 0); } inline bool isValid(int pid) { return _isValid(pid); } //@} /// @name Parton content functions //@{ inline bool _hasQ(int pid, int q) { if (abs(pid) == q) return true; //< trivial case! if (!_isValid(pid)) return false; if (_extraBits(pid) > 0) return false; if (_fundamentalID(pid) > 0) return false; return _digit(nq3,pid) == q || _digit(nq2,pid) == q || _digit(nq1,pid) == q; } /// Does this particle contain a down quark? inline bool hasDown(int pid) { return _hasQ(pid, 1); } /// Does this particle contain an up quark? inline bool hasUp(int pid) { return _hasQ(pid, 2); } /// Does this particle contain a strange quark? inline bool hasStrange(int pid) { return _hasQ(pid, 3); } /// Does this particle contain a charm quark? inline bool hasCharm(int pid) { return _hasQ(pid, 4); } /// Does this particle contain a bottom quark? inline bool hasBottom(int pid) { return _hasQ(pid, 5); } /// Does this particle contain a top quark? inline bool hasTop(int pid) { return _hasQ(pid, 6); } //@} /// @name Angular momentum functions //@{ /// jSpin returns 2J+1, where J is the total spin inline int jSpin(int pid) { const int fund = _fundamentalID(pid); if (fund > 0) { // some of these are known if (fund > 0 && fund < 7) return 2; if (fund == 9) return 3; if (fund > 10 && fund < 17) return 2; if (fund > 20 && fund < 25) return 3; return 0; } else if (_extraBits(pid) > 0) { return 0; } return abs(pid) % 10; } /// sSpin returns 2S+1, where S is the spin inline int sSpin(int pid) { // Handle invalid cases first if (!isMeson(pid)) return 0; if (_digit(n,pid) == 9 && _digit(nr,pid) == 0) return 0; // tentative ID // Special generic DM particles with defined spins const int fund = _fundamentalID(pid); if (fund == 51 || fund == 54) return 1; if (fund == 52) return 2; if (fund == 53 || fund == 55) return 3; // Calculate from nl and nj digits const int inl = _digit(nl,pid); const int js = _digit(nj,pid); if (inl == 0 && js >= 3) return 1; else if (inl == 0 && js == 1) return 0; else if (inl == 1 && js >= 3) return 0; else if (inl == 2 && js >= 3) return 1; else if (inl == 1 && js == 1) return 1; else if (inl == 3 && js >= 3) return 1; // Default to zero return 0; } /// lSpin returns 2L+1, where L is the orbital angular momentum inline int lSpin(int pid) { // Handle invalid cases first if (!isMeson(pid)) return 0; if (_digit(n,pid) == 9 && _digit(nr,pid) == 0) return 0; // tentative ID // Calculate from nl and nj digits const int inl = _digit(nl,pid); const int js = _digit(nj,pid); if (inl == 0 && js == 3) return 0; else if (inl == 0 && js == 5) return 1; else if (inl == 0 && js == 7) return 2; else if (inl == 0 && js == 9) return 3; else if (inl == 0 && js == 1) return 0; else if (inl == 1 && js == 3) return 1; else if (inl == 1 && js == 5) return 2; else if (inl == 1 && js == 7) return 3; else if (inl == 1 && js == 9) return 4; else if (inl == 2 && js == 3) return 1; else if (inl == 2 && js == 5) return 2; else if (inl == 2 && js == 7) return 3; else if (inl == 2 && js == 9) return 4; else if (inl == 1 && js == 1) return 1; else if (inl == 3 && js == 3) return 2; else if (inl == 3 && js == 5) return 3; else if (inl == 3 && js == 7) return 4; else if (inl == 3 && js == 9) return 5; // Default to zero return 0; } //@} /// @name Charge functions //@{ /// Three times the EM charge (as integer) inline int charge3(int pid) { static int ch100[100] = { -1, 2,-1, 2,-1, 2,-1, 2, 0, 0, -3, 0,-3, 0,-3, 0,-3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; const unsigned short q1 = _digit(nq1,pid); const unsigned short q2 = _digit(nq2,pid); const unsigned short q3 = _digit(nq3,pid); const int ida = abs(pid); const int sid = _fundamentalID(pid); int charge = 0; if (ida == 0 || _extraBits(pid) > 0) {// ion or illegal return 0; } else if (sid > 0 && sid <= 100) {// use table if (ida == 1000017 || ida == 1000018 || ida == 1000034) charge = 0; else if (ida > 1000050 && ida <= 1000060) charge = 0; // ? else if (ida > 50 && ida <= 60) charge = 0; // Generic DM else if (ida == 5100061 || ida == 5100062) charge = 6; else charge = ch100[sid-1]; } else if (_digit(nj,pid) == 0) {// KL, Ks, or undefined return 0; } else if (isMeson(pid)) {// mesons if (q2 == 3 || q2 == 5) { charge = ch100[q3-1] - ch100[q2-1]; } else { charge = ch100[q2-1] - ch100[q3-1]; } } else if (isDiQuark(pid)) {// diquarks charge = ch100[q2-1] + ch100[q1-1]; } else if (isBaryon(pid)) {// baryons charge = ch100[q3-1] + ch100[q2-1] + ch100[q1-1]; } else {// unknown return 0; } if (pid < 0) charge *= -1; return charge; } - /// Alias for charge3 - /// @deprecated Prefer charge3 - inline int threeCharge(int pid) { return charge3(pid); } - /// Return the absolute value of 3 times the EM charge inline int abscharge3(int pid) { return std::abs(charge3(pid)); } /// Return the EM charge (as floating point) inline double charge(int pid) { return charge3(pid)/3.0; } /// Return the absolute value of the EM charge (as floating point) inline double abscharge(int pid) { return abscharge3(pid)/3.0; } //@} /// @name General PID-based classifier functions //@{ /// Determine if the particle is electrically charged inline bool isCharged(int pid) { return charge3(pid) != 0; } /// Determine if the particle is electrically neutral inline bool isNeutral(int pid) { return charge3(pid) == 0; } //@} /// @name Fundamental particles //@{ /// Determine if the PID is that of a quark inline bool isQuark(int pid) { return in_closed_range(abs(pid), 1, 6); } /// Determine if the PID is that of a gluon inline bool isGluon(int pid) { return pid == GLUON; } /// Determine if the PID is that of a parton (quark or gluon) inline bool isParton(int pid) { return isGluon(pid) || isQuark(pid); } /// Determine if the PID is that of a photon inline bool isPhoton(int pid) { return pid == PHOTON; } /// Determine if the PID is that of an electron or positron inline bool isElectron(int pid) { return abs(pid) == ELECTRON; } /// Determine if the PID is that of an muon or antimuon inline bool isMuon(int pid) { return abs(pid) == MUON; } /// Determine if the PID is that of an tau or antitau inline bool isTau(int pid) { return abs(pid) == TAU; } /// Determine if the PID is that of a charged lepton inline bool isChargedLepton(int pid) { const long apid = abs(pid); return apid == 11 || apid == 13 || apid == 15; } // Alias inline bool isChLepton(int pid) { return isChargedLepton(pid); } /// Determine if the PID is that of a neutrino inline bool isNeutrino(int pid) { const long apid = abs(pid); return apid == 12 || apid == 14 || apid == 16; } /// Determine if the PID is that of a W+ inline bool isWplus(int pid) { return pid == WPLUSBOSON; } /// Determine if the PID is that of a W- inline bool isWminus(int pid) { return pid == WMINUSBOSON; } /// Determine if the PID is that of a W+- inline bool isW(int pid) { return abs(pid) == WPLUSBOSON; } /// Determine if the PID is that of a Z0 inline bool isZ(int pid) { return pid == Z0BOSON; } /// Determine if the PID is that of an SM/lightest SUSY Higgs inline bool isHiggs(int pid) { return pid == HIGGSBOSON || pid == 26; //< @todo Check on 26 still needed? (used in HERWIG SUSY, for example) } /// @todo isSUSYHiggs? // /// Determine if the PID is that of a d/dbar // inline bool isDown(int pid) { return abs(pid) == DQUARK; } // /// Determine if the PID is that of a u/ubar // inline bool isUp(int pid) { return abs(pid) == UQUARK; } /// Determine if the PID is that of a s/sbar inline bool isStrange(int pid) { return abs(pid) == SQUARK; } /// Determine if the PID is that of a c/cbar inline bool isCharm(int pid) { return abs(pid) == CQUARK; } /// Determine if the PID is that of a b/bbar inline bool isBottom(int pid) { return abs(pid) == BQUARK; } /// Determine if the PID is that of a t/tbar inline bool isTop(int pid) { return abs(pid) == TQUARK; } //@} /// @name Hadron and parton flavour classification //@{ /// Determine if the particle is a heavy flavour hadron or parton inline bool isHeavyFlavour(int pid) { return hasCharm(pid) || hasBottom(pid) || hasTop(pid); } // /// Determine if the particle is a light-flavour flavour hadron or parton // inline bool isLightFlavour(int pid) { // return !isHeavyFlavour(); // } /// Determine if the PID is that of a heavy parton (c,b,t) inline bool isHeavyParton(int pid) { return isParton(pid) && isHeavyFlavour(pid); } /// Determine if the PID is that of a light parton (u,d,s) inline bool isLightParton(int pid) { return isParton(pid) && !isHeavyFlavour(pid); } /// Determine if the PID is that of a heavy flavour (b or c) meson inline bool isHeavyMeson(int pid) { return isMeson(pid) && isHeavyFlavour(pid); } /// Determine if the PID is that of a heavy flavour (b or c) baryon inline bool isHeavyBaryon(int pid) { return isBaryon(pid) && isHeavyFlavour(pid); } /// Determine if the PID is that of a heavy flavour (b or c) hadron inline bool isHeavyHadron(int pid) { return isHadron(pid) && isHeavyFlavour(pid); } /// Determine if the PID is that of a light flavour (not b or c) meson inline bool isLightMeson(int pid) { return isMeson(pid) && !isHeavyFlavour(pid); } /// Determine if the PID is that of a light flavour (not b or c) baryon inline bool isLightBaryon(int pid) { return isBaryon(pid) && !isHeavyFlavour(pid); } /// Determine if the PID is that of a light flavour (not b or c) hadron inline bool isLightHadron(int pid) { return isHadron(pid) && !isHeavyFlavour(pid); } /// Determine if the PID is that of a b-meson. inline bool isBottomMeson(int pid) { return hasBottom(pid) && isMeson(pid); } /// Determine if the PID is that of a b-baryon. inline bool isBottomBaryon(int pid) { return hasBottom(pid) && isBaryon(pid); } /// Determine if the PID is that of a b-hadron. inline bool isBottomHadron(int pid) { return hasBottom(pid) && isHadron(pid); } /// @brief Determine if the PID is that of a c-meson. /// /// @note Specifically, the _heaviest_ quark is a c: a B_c is a b-meson and NOT a c-meson. /// Charmonia (closed charm) are counted as c-mesons here. inline bool isCharmMeson(int pid) { return isMeson(pid) && hasCharm(pid) && !hasBottom(pid); } /// @brief Determine if the PID is that of a c-baryon. /// /// @note Specifically, the _heaviest_ quark is a c: a baryon containing a b & c /// is a b-baryon and NOT a c-baryon. To test for the simpler case, just use /// a combination of hasCharm() and isBaryon(). inline bool isCharmBaryon(int pid) { return isBaryon(pid) && hasCharm(pid) && !hasBottom(pid); } /// Determine if the PID is that of a c-hadron. /// /// @note Specifically, the _heaviest_ quark is a c: a baryon containing a b & c /// is a b-baryon and NOT a c-baryon. To test for the simpler case, just use /// a combination of hasCharm() and isBaryon(). inline bool isCharmHadron(int pid) { return isHadron(pid) && hasCharm(pid) && !hasBottom(pid); } /// Determine if the PID is that of a strange meson /// /// @note Specifically, the _heaviest_ quark is an s: if it also contains /// either charm or bottom, it is not considered to be a strange hadron. inline bool isStrangeMeson(int pid) { return isMeson(pid) && hasStrange(pid) && !(hasBottom(pid) || hasCharm(pid)); } /// Determine if the PID is that of a strange baryon /// /// @note Specifically, the _heaviest_ quark is an s: if it also contains /// either charm or bottom, it is not considered to be a strange hadron. inline bool isStrangeBaryon(int pid) { return isBaryon(pid) && hasStrange(pid) && !(hasBottom(pid) || hasCharm(pid)); } /// Determine if the PID is that of a strange hadron /// /// @note Specifically, the _heaviest_ quark is an s: if it also contains /// either charm or bottom, it is not considered to be a strange hadron. inline bool isStrangeHadron(int pid) { return isHadron(pid) && hasStrange(pid) && !(hasBottom(pid) || hasCharm(pid)); } //@} /// @name Interaction classifiers //@{ /// Determine if the PID is that of a strongly interacting particle inline bool isStrongInteracting(int pid) { return isParton(pid) || isHadron(pid); } /// Determine if the PID is that of a electromagnetically interacting particle inline bool isEMInteracting(int pid) { return isCharged(pid) || isPhoton(pid); } /// Determine if the PID is that of a weakly interacting particle /// /// @note Photons are considered weak-interacting, as are all hadrons and /// leptons (we can't distinguish between L and R fermions at physical particle level). inline bool isWeakInteracting(int pid) { return !isGluon(pid) && !isGraviton(pid); } //@} /// @name Other classifiers //@{ /// Determine if the PID is in the generator-specific range inline bool isGenSpecific(int pid) { return in_range(pid, 80, 101); } /// Determine if the PID is that of an EW scale resonance /// /// @todo Also include SUSY, technicolor, etc. etc.? Maybe via a isStandardModel(pid) function, but there are stable BSM particles (in principle) inline bool isResonance(int pid) { return isW(pid) || isZ(pid) || isHiggs(pid) || isTop(pid); } /// Check the PID for usability in transport codes like Geant4 /// /// @todo Should exclude neutrinos/LSP, since the ATLAS G4 interface deletes them immediately? inline bool isTransportable(int pid) { // return !isResonance(pid) && !isParton(pid) && !isGenSpecific(pid); return isPhoton(pid) || isHadron(pid) || isLepton(pid); } //@} /// @name Particle pair classifiers /// @todo Make versions that work on PdgIdPair? //@{ inline bool isSameSign(PdgId a, PdgId b) { return a*b >= 0; } inline bool isOppSign(PdgId a, PdgId b) { return !isSameSign(a, b); } inline bool isSameFlav(PdgId a, PdgId b) { return abs(a) == abs(b); } inline bool isOppFlav(PdgId a, PdgId b) { return !isSameFlav(a, b); } inline bool isOSSF(PdgId a, PdgId b) { return isOppSign(a, b) && isSameFlav(a, b); } inline bool isSSSF(PdgId a, PdgId b) { return isSameSign(a, b) && isSameFlav(a, b); } inline bool isOSOF(PdgId a, PdgId b) { return isOppSign(a, b) && isOppFlav(a, b); } inline bool isSSOF(PdgId a, PdgId b) { return isSameSign(a, b) && isOppFlav(a, b); } //@} } } #endif diff --git a/include/Rivet/Tools/ParticleUtils.hh b/include/Rivet/Tools/ParticleUtils.hh --- a/include/Rivet/Tools/ParticleUtils.hh +++ b/include/Rivet/Tools/ParticleUtils.hh @@ -1,773 +1,768 @@ #ifndef RIVET_PARTICLEUTILS_HH #define RIVET_PARTICLEUTILS_HH #include "Rivet/Particle.hh" #include "Rivet/Tools/ParticleBaseUtils.hh" #include "Rivet/Tools/ParticleIdUtils.hh" // Macros to map Rivet::Particle functions to PID:: functions of the same name #define PARTICLE_TO_PID_BOOLFN(fname) inline bool fname (const Particle& p) { return PID:: fname (p.pid()); } #define PARTICLE_TO_PID_INTFN(fname) inline int fname (const Particle& p) { return PID:: fname (p.pid()); } #define PARTICLE_TO_PID_DBLFN(fname) inline double fname (const Particle& p) { return PID:: fname (p.pid()); } namespace Rivet { /// @name Particle classifier functions //@{ /// Unbound function access to PID code inline int pid(const Particle& p) { return p.pid(); } /// Unbound function access to abs PID code inline int abspid(const Particle& p) { return p.abspid(); } /// Is this particle species charged? PARTICLE_TO_PID_BOOLFN(isCharged) /// Is this particle species neutral? PARTICLE_TO_PID_BOOLFN(isNeutral) /// Is this a neutrino? PARTICLE_TO_PID_BOOLFN(isNeutrino) /// Determine if the PID is that of a charged lepton PARTICLE_TO_PID_BOOLFN(isChargedLepton) PARTICLE_TO_PID_BOOLFN(isChLepton) /// Determine if the PID is that of a lepton (charged or neutral) PARTICLE_TO_PID_BOOLFN(isLepton) /// Determine if the PID is that of a photon PARTICLE_TO_PID_BOOLFN(isPhoton) /// Determine if the PID is that of an electron or positron PARTICLE_TO_PID_BOOLFN(isElectron) /// Determine if the PID is that of an muon or antimuon PARTICLE_TO_PID_BOOLFN(isMuon) /// Determine if the PID is that of an tau or antitau PARTICLE_TO_PID_BOOLFN(isTau) /// Determine if the PID is that of a hadron PARTICLE_TO_PID_BOOLFN(isHadron) /// Determine if the PID is that of a meson PARTICLE_TO_PID_BOOLFN(isMeson) /// Determine if the PID is that of a baryon PARTICLE_TO_PID_BOOLFN(isBaryon) /// Determine if the PID is that of a quark PARTICLE_TO_PID_BOOLFN(isQuark) /// Determine if the PID is that of a parton (quark or gluon) PARTICLE_TO_PID_BOOLFN(isParton) /// Determine if the PID is that of a W+ PARTICLE_TO_PID_BOOLFN(isWplus) /// Determine if the PID is that of a W- PARTICLE_TO_PID_BOOLFN(isWminus) /// Determine if the PID is that of a W+- PARTICLE_TO_PID_BOOLFN(isW) /// Determine if the PID is that of a Z0 PARTICLE_TO_PID_BOOLFN(isZ) /// Determine if the PID is that of an SM/lightest SUSY Higgs PARTICLE_TO_PID_BOOLFN(isHiggs) /// Determine if the PID is that of an s/sbar PARTICLE_TO_PID_BOOLFN(isStrange) /// Determine if the PID is that of a c/cbar PARTICLE_TO_PID_BOOLFN(isCharm) /// Determine if the PID is that of a b/bbar PARTICLE_TO_PID_BOOLFN(isBottom) /// Determine if the PID is that of a t/tbar PARTICLE_TO_PID_BOOLFN(isTop) /// Determine if the particle is a heavy flavour hadron or parton PARTICLE_TO_PID_BOOLFN(isHeavyFlavour) /// Determine if the PID is that of a heavy parton (c,b,t) PARTICLE_TO_PID_BOOLFN(isHeavyParton) /// Determine if the PID is that of a light parton (u,d,s) PARTICLE_TO_PID_BOOLFN(isLightParton) /// Determine if the PID is that of a heavy flavour (b or c) meson PARTICLE_TO_PID_BOOLFN(isHeavyMeson) /// Determine if the PID is that of a heavy flavour (b or c) baryon PARTICLE_TO_PID_BOOLFN(isHeavyBaryon) /// Determine if the PID is that of a heavy flavour (b or c) hadron PARTICLE_TO_PID_BOOLFN(isHeavyHadron) /// Determine if the PID is that of a light flavour (not b or c) meson PARTICLE_TO_PID_BOOLFN(isLightMeson) /// Determine if the PID is that of a light flavour (not b or c) baryon PARTICLE_TO_PID_BOOLFN(isLightBaryon) /// Determine if the PID is that of a light flavour (not b or c) hadron PARTICLE_TO_PID_BOOLFN(isLightHadron) /// Determine if the PID is that of a b-meson. PARTICLE_TO_PID_BOOLFN(isBottomMeson) /// Determine if the PID is that of a b-baryon. PARTICLE_TO_PID_BOOLFN(isBottomBaryon) /// Determine if the PID is that of a b-hadron. PARTICLE_TO_PID_BOOLFN(isBottomHadron) /// @brief Determine if the PID is that of a c-meson. /// /// Specifically, the _heaviest_ quark is a c: a B_c is a b-meson and NOT a c-meson. /// Charmonia (closed charm) are counted as c-mesons here. PARTICLE_TO_PID_BOOLFN(isCharmMeson) /// @brief Determine if the PID is that of a c-baryon. /// /// Specifically, the _heaviest_ quark is a c: a baryon containing a b & c /// is a b-baryon and NOT a c-baryon. To test for the simpler case, just use /// a combination of hasCharm() and isBaryon(). PARTICLE_TO_PID_BOOLFN(isCharmBaryon) /// Determine if the PID is that of a c-hadron. PARTICLE_TO_PID_BOOLFN(isCharmHadron) // /// Determine if the PID is that of a strange meson // PARTICLE_TO_PID_BOOLFN(isStrangeMeson) // /// Determine if the PID is that of a strange baryon // PARTICLE_TO_PID_BOOLFN(isStrangeBaryon) // /// Determine if the PID is that of a strange hadron // PARTICLE_TO_PID_BOOLFN(isStrangeHadron) /// Is this a pomeron, odderon, or generic reggeon? PARTICLE_TO_PID_BOOLFN(isReggeon) /// Determine if the PID is that of a diquark (used in hadronization models) PARTICLE_TO_PID_BOOLFN(isDiquark) /// Determine if the PID is that of a pentaquark (hypothetical hadron) PARTICLE_TO_PID_BOOLFN(isPentaquark) /// Is this a fundamental SUSY particle? PARTICLE_TO_PID_BOOLFN(isSUSY) /// Is this an R-hadron? PARTICLE_TO_PID_BOOLFN(isRhadron) /// Is this a technicolor particle? PARTICLE_TO_PID_BOOLFN(isTechnicolor) /// Is this an excited (composite) quark or lepton? PARTICLE_TO_PID_BOOLFN(isExcited) /// Is this a Kaluza-Klein excitation? PARTICLE_TO_PID_BOOLFN(isKK) /// Is this a graviton? PARTICLE_TO_PID_BOOLFN(isGraviton) /// Is this a BSM particle (including graviton)? PARTICLE_TO_PID_BOOLFN(isBSM) /// Determine if the PID is in the generator-specific range PARTICLE_TO_PID_BOOLFN(isGenSpecific) /// Determine if the PID is that of an EW scale resonance PARTICLE_TO_PID_BOOLFN(isResonance) /// Check the PID for usability in transport codes like Geant4 PARTICLE_TO_PID_BOOLFN(isTransportable) /// Does this particle contain an up quark? PARTICLE_TO_PID_BOOLFN(hasUp) /// Does this particle contain a down quark? PARTICLE_TO_PID_BOOLFN(hasDown) /// Does this particle contain a strange quark? PARTICLE_TO_PID_BOOLFN(hasStrange) /// Does this particle contain a charm quark? PARTICLE_TO_PID_BOOLFN(hasCharm) /// Does this particle contain a bottom quark? PARTICLE_TO_PID_BOOLFN(hasBottom) /// Does this particle contain a top quark? PARTICLE_TO_PID_BOOLFN(hasTop) /// jSpin returns 2J+1, where J is the total spin PARTICLE_TO_PID_INTFN(jSpin) /// sSpin returns 2S+1, where S is the spin PARTICLE_TO_PID_INTFN(sSpin) /// lSpin returns 2L+1, where L is the orbital angular momentum PARTICLE_TO_PID_INTFN(lSpin) /// Return the charge PARTICLE_TO_PID_DBLFN(charge) /// Return 3 times the charge (3 x quark charge is an int) PARTICLE_TO_PID_INTFN(charge3) /// Return the absolute charge PARTICLE_TO_PID_DBLFN(abscharge) /// Return 3 times the abs charge (3 x quark charge is an int) PARTICLE_TO_PID_INTFN(abscharge3) - /// Alias for charge3 - /// @deprecated Use charge3 - PARTICLE_TO_PID_INTFN(threeCharge) - - /// Get the atomic number (number of protons) in a nucleus/ion PARTICLE_TO_PID_INTFN(nuclZ) /// Get the atomic weight (number of nucleons) in a nucleus/ion PARTICLE_TO_PID_INTFN(nuclA) /// If this is a nucleus (ion), get nLambda PARTICLE_TO_PID_INTFN(nuclNlambda) //@} /// @name Particle pair classifiers /// @todo Make versions that work on ParticlePair? //@{ inline bool isSameSign(const Particle& a, const Particle& b) { return PID::isSameSign(a.pid(), b.pid()); } inline bool isOppSign(const Particle& a, const Particle& b) { return PID::isOppSign(a.pid(), b.pid()); } inline bool isSameFlav(const Particle& a, const Particle& b) { return PID::isSameFlav(a.pid(), b.pid()); } inline bool isOppFlav(const Particle& a, const Particle& b) { return PID::isOppFlav(a.pid(), b.pid()); } inline bool isOSSF(const Particle& a, const Particle& b) { return PID::isOSSF(a.pid(), b.pid()); } inline bool isSSSF(const Particle& a, const Particle& b) { return PID::isSSSF(a.pid(), b.pid()); } inline bool isOSOF(const Particle& a, const Particle& b) { return PID::isOSOF(a.pid(), b.pid()); } inline bool isSSOF(const Particle& a, const Particle& b) { return PID::isSSOF(a.pid(), b.pid()); } //@} /// @name Particle charge/sign comparison functions //@{ /// @brief Return true if Particles @a a and @a b have the opposite charge sign /// @note Two neutrals returns false inline bool oppSign(const Particle& a, const Particle& b) { return sign(a.charge3()) == -sign(b.charge3()) && sign(a.charge3()) != ZERO; } /// Return true if Particles @a a and @a b have the same charge sign /// @note Two neutrals returns true inline bool sameSign(const Particle& a, const Particle& b) { return sign(a.charge3()) == sign(b.charge3()); } /// Return true if Particles @a a and @a b have the exactly opposite charge /// @note Two neutrals returns false inline bool oppCharge(const Particle& a, const Particle& b) { return a.charge3() == -b.charge3() && a.charge3() != 0; } /// Return true if Particles @a a and @a b have the same charge (including neutral) /// @note Two neutrals returns true inline bool sameCharge(const Particle& a, const Particle& b) { return a.charge3() == b.charge3(); } /// Return true if Particles @a a and @a b have a different (not necessarily opposite) charge inline bool diffCharge(const Particle& a, const Particle& b) { return a.charge3() != b.charge3(); } //@} ////////////////////////////////////// /// @name Non-PID particle properties, via unbound functions //@{ /// @brief Determine whether a particle is the first in a decay chain to meet the function requirement inline bool isFirstWith(const Particle& p, const ParticleSelector& f) { return p.isFirstWith(f); } /// @brief Determine whether a particle is the first in a decay chain not to meet the function requirement inline bool isFirstWithout(const Particle& p, const ParticleSelector& f) { return p.isFirstWithout(f); } /// @brief Determine whether a particle is the last in a decay chain to meet the function requirement inline bool isLastWith(const Particle& p, const ParticleSelector& f) { return p.isLastWith(f); } /// @brief Determine whether a particle is the last in a decay chain not to meet the function requirement inline bool isLastWithout(const Particle& p, const ParticleSelector& f) { return p.isLastWithout(f); } /// @brief Determine whether a particle has an ancestor which meets the function requirement inline bool hasAncestorWith(const Particle& p, const ParticleSelector& f) { return p.hasAncestorWith(f); } /// @brief Determine whether a particle has an ancestor which doesn't meet the function requirement inline bool hasAncestorWithout(const Particle& p, const ParticleSelector& f) { return p.hasAncestorWithout(f); } /// @brief Determine whether a particle has a parent which meets the function requirement inline bool hasParentWith(const Particle& p, const ParticleSelector& f) { return p.hasParentWith(f); } /// @brief Determine whether a particle has a parent which doesn't meet the function requirement inline bool hasParentWithout(const Particle& p, const ParticleSelector& f) { return p.hasParentWithout(f); } /// @brief Determine whether a particle has a child which meets the function requirement inline bool hasChildWith(const Particle& p, const ParticleSelector& f) { return p.hasChildWith(f); } /// @brief Determine whether a particle has a child which doesn't meet the function requirement inline bool hasChildWithout(const Particle& p, const ParticleSelector& f) { return p.hasChildWithout(f); } /// @brief Determine whether a particle has a descendant which meets the function requirement inline bool hasDescendantWith(const Particle& p, const ParticleSelector& f) { return p.hasDescendantWith(f); // return !p.allDescendants(f).empty(); } /// @brief Determine whether a particle has a descendant which doesn't meet the function requirement inline bool hasDescendantWithout(const Particle& p, const ParticleSelector& f) { return p.hasDescendantWithout(f); } /// @brief Determine whether a particle has a stable descendant which meets the function requirement inline bool hasStableDescendantWith(const Particle& p, const ParticleSelector& f) { return p.hasStableDescendantWith(f); } /// @brief Determine whether a particle has a stable descendant which doesn't meet the function requirement inline bool hasStableDescendantWithout(const Particle& p, const ParticleSelector& f) { return p.hasStableDescendantWithout(f); } /// Is this particle potentially visible in a detector? inline bool isVisible(const Particle& p) { return p.isVisible(); } /// @brief Decide if a given particle is direct, via Particle::isDirect() /// /// A "direct" particle is one directly connected to the hard process. It is a /// preferred alias for "prompt", since it has no confusing implications about /// distinguishability by timing information. /// /// The boolean arguments allow a decay lepton to be considered direct if /// its parent was a "real" direct lepton. inline bool isDirect(const Particle& p, bool allow_from_direct_tau=false, bool allow_from_direct_mu=false) { return p.isDirect(allow_from_direct_tau, allow_from_direct_mu); } /// @brief Decide if a given particle is prompt, via Particle::isPrompt() /// /// The boolean arguments allow a decay lepton to be considered prompt if /// its parent was a "real" prompt lepton. inline bool isPrompt(const Particle& p, bool allow_from_prompt_tau=false, bool allow_from_prompt_mu=false) { return p.isPrompt(allow_from_prompt_tau, allow_from_prompt_mu); } /// Decide if a given particle is stable, via Particle::isStable() inline bool isStable(const Particle& p) { return p.isStable(); } /// Decide if a given particle decays hadronically inline bool hasHadronicDecay(const Particle& p) { if (p.isStable()) return false; if (p.hasChildWith(isHadron)) return true; return false; } /// Decide if a given particle decays leptonically (decays, and no hadrons) inline bool hasLeptonicDecay(const Particle& p) { if (p.isStable()) return false; if (p.hasChildWith(isHadron)) return false; return true; } /// Check whether a given PID is found in the particle's ancestor list /// @deprecated Prefer hasAncestorWith inline bool hasAncestor(const Particle& p, PdgId pid) { return p.hasAncestor(pid); } /// Determine whether the particle is from a b-hadron decay inline bool fromBottom(const Particle& p) { return p.fromBottom(); } /// @brief Determine whether the particle is from a c-hadron decay inline bool fromCharm(const Particle& p) { return p.fromCharm(); } /// @brief Determine whether the particle is from a hadron decay inline bool fromHadron(const Particle& p) { return p.fromHadron(); } /// @brief Determine whether the particle is from a tau decay inline bool fromTau(const Particle& p, bool prompt_taus_only=false) { return p.fromTau(prompt_taus_only); } /// @brief Determine whether the particle is from a prompt tau decay inline bool fromPromptTau(const Particle& p) { return p.fromPromptTau(); } /// @brief Determine whether the particle is from a hadron or tau decay /// @deprecated Too vague: use fromHadron or fromHadronicTau inline bool fromDecay(const Particle& p) { return p.fromDecay(); } //@} /// @name Particle classifier -> bool functors /// /// To be passed to any() or all() e.g. any(p.children(), HasPID(PID::MUON)) //@{ /// Base type for Particle -> bool functors struct BoolParticleFunctor { virtual bool operator()(const Particle& p) const = 0; virtual ~BoolParticleFunctor() {} }; /// Functor for and-combination of selector logic struct BoolParticleAND : public BoolParticleFunctor { BoolParticleAND(const std::vector& sels) : selectors(sels) {} BoolParticleAND(const ParticleSelector& a, const ParticleSelector& b) : selectors({a,b}) {} BoolParticleAND(const ParticleSelector& a, const ParticleSelector& b, const ParticleSelector& c) : selectors({a,b,c}) {} bool operator()(const Particle& p) const { for (const ParticleSelector& sel : selectors) if (!sel(p)) return false; return true; } std::vector selectors; }; /// Operator syntactic sugar for AND construction inline BoolParticleAND operator && (const ParticleSelector& a, const ParticleSelector& b) { return BoolParticleAND(a, b); } /// Functor for or-combination of selector logic struct BoolParticleOR : public BoolParticleFunctor { BoolParticleOR(const std::vector& sels) : selectors(sels) {} BoolParticleOR(const ParticleSelector& a, const ParticleSelector& b) : selectors({a,b}) {} BoolParticleOR(const ParticleSelector& a, const ParticleSelector& b, const ParticleSelector& c) : selectors({a,b,c}) {} bool operator()(const Particle& p) const { for (const ParticleSelector& sel : selectors) if (sel(p)) return true; return false; } std::vector selectors; }; /// Operator syntactic sugar for OR construction inline BoolParticleOR operator || (const ParticleSelector& a, const ParticleSelector& b) { return BoolParticleOR(a, b); } /// Functor for inverting selector logic struct BoolParticleNOT : public BoolParticleFunctor { BoolParticleNOT(const ParticleSelector& sel) : selector(sel) {} bool operator()(const Particle& p) const { return !selector(p); } ParticleSelector selector; }; /// Operator syntactic sugar for NOT construction inline BoolParticleNOT operator ! (const ParticleSelector& a) { return BoolParticleNOT(a); } /// PID matching functor struct HasPID : public BoolParticleFunctor { HasPID(PdgId pid) : targetpids{pid} { } HasPID(vector pids) : targetpids{pids} { } HasPID(initializer_list pids) : targetpids{pids} { } bool operator()(const Particle& p) const { return contains(targetpids, p.pid()); } vector targetpids; }; using hasPID = HasPID; /// |PID| matching functor struct HasAbsPID : public BoolParticleFunctor { HasAbsPID(PdgId pid) : targetapids{abs(pid)} { } HasAbsPID(vector pids) { for (PdgId pid : pids) targetapids.push_back(abs(pid)); } HasAbsPID(initializer_list pids) { for (PdgId pid : pids) targetapids.push_back(abs(pid)); } bool operator()(const Particle& p) const { return contains(targetapids, p.abspid()); } vector targetapids; }; using hasAbsPID = HasAbsPID; /// Determine whether a particle is the first in a decay chain to meet the cut/function struct FirstParticleWith : public BoolParticleFunctor { FirstParticleWith(const ParticleSelector& f) : fn(f) { } FirstParticleWith(const Cut& c); bool operator()(const Particle& p) const { return isFirstWith(p, fn); } ParticleSelector fn; }; using firstParticleWith = FirstParticleWith; /// Determine whether a particle is the first in a decay chain not to meet the cut/function struct FirstParticleWithout : public BoolParticleFunctor { FirstParticleWithout(const ParticleSelector& f) : fn(f) { } FirstParticleWithout(const Cut& c); bool operator()(const Particle& p) const { return isFirstWithout(p, fn); } ParticleSelector fn; }; using firstParticleWithout = FirstParticleWithout; /// Determine whether a particle is the last in a decay chain to meet the cut/function struct LastParticleWith : public BoolParticleFunctor { template LastParticleWith(const FN& f) : fn(f) { } LastParticleWith(const Cut& c); bool operator()(const Particle& p) const { return isLastWith(p, fn); } std::function fn; }; using lastParticleWith = LastParticleWith; /// Determine whether a particle is the last in a decay chain not to meet the cut/function struct LastParticleWithout : public BoolParticleFunctor { LastParticleWithout(const ParticleSelector& f) : fn(f) { } LastParticleWithout(const Cut& c); bool operator()(const Particle& p) const { return isLastWithout(p, fn); } ParticleSelector fn; }; using lastParticleWithout = LastParticleWithout; /// Determine whether a particle has an ancestor which meets the cut/function struct HasParticleAncestorWith : public BoolParticleFunctor { HasParticleAncestorWith(const ParticleSelector& f) : fn(f) { } HasParticleAncestorWith(const Cut& c); bool operator()(const Particle& p) const { return hasAncestorWith(p, fn); } ParticleSelector fn; }; using hasParticleAncestorWith = HasParticleAncestorWith; /// Determine whether a particle has an ancestor which doesn't meet the cut/function struct HasParticleAncestorWithout : public BoolParticleFunctor { HasParticleAncestorWithout(const ParticleSelector& f) : fn(f) { } HasParticleAncestorWithout(const Cut& c); bool operator()(const Particle& p) const { return hasAncestorWithout(p, fn); } ParticleSelector fn; }; using hasParticleAncestorWithout = HasParticleAncestorWithout; /// Determine whether a particle has an parent which meets the cut/function struct HasParticleParentWith : public BoolParticleFunctor { HasParticleParentWith(const ParticleSelector& f) : fn(f) { } HasParticleParentWith(const Cut& c); bool operator()(const Particle& p) const { return hasParentWith(p, fn); } ParticleSelector fn; }; using hasParticleParentWith = HasParticleParentWith; /// Determine whether a particle has an parent which doesn't meet the cut/function struct HasParticleParentWithout : public BoolParticleFunctor { HasParticleParentWithout(const ParticleSelector& f) : fn(f) { } HasParticleParentWithout(const Cut& c); bool operator()(const Particle& p) const { return hasParentWithout(p, fn); } ParticleSelector fn; }; using hasParticleParentWithout = HasParticleParentWithout; /// Determine whether a particle has a child which meets the cut/function struct HasParticleChildWith : public BoolParticleFunctor { HasParticleChildWith(const ParticleSelector& f) : fn(f) { } HasParticleChildWith(const Cut& c); bool operator()(const Particle& p) const { return hasChildWith(p, fn); } ParticleSelector fn; }; using hasParticleChildWith = HasParticleChildWith; /// Determine whether a particle has a child which doesn't meet the cut/function struct HasParticleChildWithout : public BoolParticleFunctor { HasParticleChildWithout(const ParticleSelector& f) : fn(f) { } HasParticleChildWithout(const Cut& c); bool operator()(const Particle& p) const { return hasChildWithout(p, fn); } ParticleSelector fn; }; using hasParticleChildWithout = HasParticleChildWithout; /// Determine whether a particle has a descendant which meets the cut/function struct HasParticleDescendantWith : public BoolParticleFunctor { HasParticleDescendantWith(const ParticleSelector& f) : fn(f) { } HasParticleDescendantWith(const Cut& c); bool operator()(const Particle& p) const { return hasDescendantWith(p, fn); } ParticleSelector fn; }; using hasParticleDescendantWith = HasParticleDescendantWith; /// Determine whether a particle has a descendant which doesn't meet the cut/function struct HasParticleDescendantWithout : public BoolParticleFunctor { HasParticleDescendantWithout(const ParticleSelector& f) : fn(f) { } HasParticleDescendantWithout(const Cut& c); bool operator()(const Particle& p) const { return hasDescendantWithout(p, fn); } ParticleSelector fn; }; using hasParticleDescendantWithout = HasParticleDescendantWithout; //@} /// @name Unbound functions for filtering particles //@{ /// Filter a particle collection in-place to the subset that passes the supplied Cut Particles& ifilter_select(Particles& particles, const Cut& c); /// Alias for ifilter_select /// @deprecated Use ifilter_select inline Particles& ifilterBy(Particles& particles, const Cut& c) { return ifilter_select(particles, c); } /// Filter a particle collection in-place to the subset that passes the supplied Cut inline Particles filter_select(const Particles& particles, const Cut& c) { Particles rtn = particles; return ifilter_select(rtn, c); } /// Alias for ifilter_select /// @deprecated Use filter_select inline Particles filterBy(const Particles& particles, const Cut& c) { return filter_select(particles, c); } /// Filter a particle collection in-place to the subset that passes the supplied Cut inline Particles filter_select(const Particles& particles, const Cut& c, Particles& out) { out = filter_select(particles, c); return out; } /// Alias for ifilter_select /// @deprecated Use filter_select inline Particles filterBy(const Particles& particles, const Cut& c, Particles& out) { return filter_select(particles, c, out); } /// Filter a particle collection in-place to the subset that fails the supplied Cut Particles& ifilter_discard(Particles& particles, const Cut& c); /// Filter a particle collection in-place to the subset that fails the supplied Cut inline Particles filter_discard(const Particles& particles, const Cut& c) { Particles rtn = particles; return ifilter_discard(rtn, c); } /// Filter a particle collection in-place to the subset that fails the supplied Cut inline Particles filter_discard(const Particles& particles, const Cut& c, Particles& out) { out = filter_discard(particles, c); return out; } // inline void ifilterIsolateDeltaR(Particles& particles, const FourMomenta& vecs) { // ifilter_discard(particles, // } // inline Particles filterIsolateDeltaR(const Particles& particles, const FourMomenta& vecs) { // } //@} /// @name Particle pair functions //@{ /// Get the PDG ID codes of a ParticlePair /// @todo Make ParticlePair a custom class instead? inline PdgIdPair pids(const ParticlePair& pp) { return make_pair(pp.first.pid(), pp.second.pid()); } //@} /// @name Operations on collections of Particle /// @note This can't be done on generic collections of ParticleBase -- thanks, C++ :-/ //@{ namespace Kin { inline double sumPt(const Particles& ps) { return sum(ps, pT, 0.0); } inline FourMomentum sumP4(const Particles& ps) { return sum(ps, p4, FourMomentum()); } inline Vector3 sumP3(const Particles& ps) { return sum(ps, p3, Vector3()); } /// @todo Min dPhi, min dR? /// @todo Isolation routines? } //@} // Import Kin namespace into Rivet using namespace Kin; } #endif diff --git a/src/Core/Jet.cc b/src/Core/Jet.cc --- a/src/Core/Jet.cc +++ b/src/Core/Jet.cc @@ -1,185 +1,185 @@ #include "Rivet/Jet.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Tools/ParticleIdUtils.hh" namespace Rivet { Jet& Jet::clear() { _momentum = FourMomentum(); _pseudojet.reset(0,0,0,0); _particles.clear(); return *this; } Jet& Jet::setState(const FourMomentum& mom, const Particles& particles, const Particles& tags) { clear(); _momentum = mom; _pseudojet = fastjet::PseudoJet(mom.px(), mom.py(), mom.pz(), mom.E()); _particles = particles; _tags = tags; return *this; } Jet& Jet::setState(const fastjet::PseudoJet& pj, const Particles& particles, const Particles& tags) { clear(); _pseudojet = pj; _momentum = FourMomentum(pj.e(), pj.px(), pj.py(), pj.pz()); _particles = particles; _tags = tags; // if (_particles.empty()) { // for (const fastjet::PseudoJet pjc : _pseudojet.constituents()) { // // If there is no attached user info, we can't create a meaningful particle, so skip // if (!pjc.has_user_info()) continue; // const RivetFJInfo& fjinfo = pjc.user_info(); // // Don't add ghosts to the particles list // if (fjinfo.isGhost) continue; // // Otherwise construct a Particle from the PseudoJet, preferably from an associated GenParticle // ?if (fjinfo.genParticle != NULL) { // _particles.push_back(Particle(fjinfo.genParticle)); // } else { // if (fjinfo.pid == 0) continue; // skip if there is a null PID entry in the FJ info // const FourMomentum pjcmom(pjc.e(), pjc.px(), pjc.py(), pjc.pz()); // _particles.push_back(Particle(fjinfo.pid, pjcmom)); // } // } // } return *this; } Jet& Jet::setParticles(const Particles& particles) { _particles = particles; return *this; } bool Jet::containsParticle(const Particle& particle) const { const int barcode = particle.genParticle()->barcode(); for (const Particle& p : particles()) { if (p.genParticle()->barcode() == barcode) return true; } return false; } bool Jet::containsParticleId(PdgId pid) const { for (const Particle& p : particles()) { if (p.pid() == pid) return true; } return false; } bool Jet::containsParticleId(const vector& pids) const { for (const Particle& p : particles()) { for (PdgId pid : pids) { if (p.pid() == pid) return true; } } return false; } /// @todo Jet::containsMatch(Matcher m) { ... if m(pid) return true; ... } Jet& Jet::transformBy(const LorentzTransform& lt) { _momentum = lt.transform(_momentum); for (Particle& p : _particles) p.transformBy(lt); for (Particle& t : _tags) t.transformBy(lt); _pseudojet.reset(_momentum.px(), _momentum.py(), _momentum.pz(), _momentum.E()); //< lose ClusterSeq etc. return *this; } double Jet::neutralEnergy() const { double e_neutral = 0.0; for (const Particle& p : particles()) { const PdgId pid = p.pid(); - if (PID::threeCharge(pid) == 0) { + if (PID::charge3(pid) == 0) { e_neutral += p.E(); } } return e_neutral; } double Jet::hadronicEnergy() const { double e_hadr = 0.0; for (const Particle& p : particles()) { const PdgId pid = p.pid(); if (PID::isHadron(pid)) { e_hadr += p.E(); } } return e_hadr; } Particles Jet::tags(const Cut& c) const { return filter_select(tags(), c); } Particles Jet::bTags(const Cut& c) const { Particles rtn; for (const Particle& tp : tags()) { if (hasBottom(tp) && c->accept(tp)) rtn.push_back(tp); } return rtn; } Particles Jet::cTags(const Cut& c) const { Particles rtn; for (const Particle& tp : tags()) { /// @todo Is making b and c tags exclusive the right thing to do? if (hasCharm(tp) && !hasBottom(tp) && c->accept(tp)) rtn.push_back(tp); } return rtn; } Particles Jet::tauTags(const Cut& c) const { Particles rtn; for (const Particle& tp : tags()) { if (isTau(tp) && c->accept(tp)) rtn.push_back(tp); } return rtn; } ////////////////////// /// Jets copy constructor from vector Jets::Jets(const std::vector& vjs) : base(vjs) {} /// Jets -> FourMomenta cast/conversion operator Jets::operator FourMomenta () const { // FourMomenta rtn(this->begin(), this->end()); FourMomenta rtn; rtn.reserve(this->size()); for (size_t i = 0; i < this->size(); ++i) rtn.push_back((*this)[i]); return rtn; } /// Jets concatenation operator Jets operator + (const Jets& a, const Jets& b) { Jets rtn(a); rtn += b; return rtn; } /// Allow a Jet to be passed to an ostream. std::ostream& operator << (std::ostream& os, const Jet& j) { using std::boolalpha; os << "Jet<" << j.mom()/GeV << " GeV; Nparticles=" << j.size() << "; "; os << "bTag=" << boolalpha << j.bTagged() << ", "; os << "cTag=" << boolalpha << j.cTagged() << ", "; os << "tauTag=" << boolalpha << j.tauTagged() << ">"; return os; } } diff --git a/src/Core/Particle.cc b/src/Core/Particle.cc --- a/src/Core/Particle.cc +++ b/src/Core/Particle.cc @@ -1,310 +1,310 @@ #include "Rivet/Particle.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/ParticleIdUtils.hh" #include "Rivet/Tools/ParticleUtils.hh" namespace Rivet { void Particle::setConstituents(const Particles& cs, bool setmom) { _constituents = cs; if (setmom) _momentum = sum(cs, p4, FourMomentum()); } void Particle::addConstituent(const Particle& c, bool addmom) { _constituents += c; if (addmom) _momentum += c; } void Particle::addConstituents(const Particles& cs, bool addmom) { _constituents += cs; if (addmom) for (const Particle& c : cs) _momentum += c; } Particles Particle::rawConstituents() const { if (!isComposite()) return Particles{*this}; Particles rtn; for (const Particle& p : constituents()) rtn += p.rawConstituents(); return rtn; } Particle& Particle::transformBy(const LorentzTransform& lt) { _momentum = lt.transform(_momentum); return *this; } bool Particle::isVisible() const { // Charged particles are visible - if ( PID::threeCharge(pid()) != 0 ) return true; + if ( PID::charge3(pid()) != 0 ) return true; // Neutral hadrons are visible if ( PID::isHadron(pid()) ) return true; // Photons are visible if ( pid() == PID::PHOTON ) return true; // Gluons are visible (for parton level analyses) if ( pid() == PID::GLUON ) return true; // Everything else is invisible return false; } bool Particle::isStable() const { return genParticle() != NULL && genParticle()->status() == 1 && genParticle()->end_vertex() == NULL; } Particles Particle::ancestors(const Cut& c, bool physical_only) const { Particles rtn; // this case needed protecting against (at least for the latest Herwig... not sure why // it didn't show up earlier if (genParticle() == NULL) return rtn; /// @todo Remove this const mess crap when HepMC doesn't suck GenVertexPtr gv = const_cast( genParticle()->production_vertex() ); if (gv == NULL) return rtn; /// @todo Would like to do this, but the range objects are broken // for (const GenParticlePtr gp, gv->particles(HepMC::children)) // rtn += Particle(gp); for (GenVertex::particle_iterator it = gv->particles_begin(HepMC::ancestors); it != gv->particles_end(HepMC::ancestors); ++it) { if (physical_only && (*it)->status() != 1 && (*it)->status() != 2) continue; const Particle p(*it); if (c != Cuts::OPEN && !c->accept(p)) continue; rtn += p; } return rtn; } Particles Particle::parents(const Cut& c) const { Particles rtn; /// @todo Remove this const mess crap when HepMC doesn't suck GenVertexPtr gv = const_cast( genParticle()->production_vertex() ); if (gv == NULL) return rtn; /// @todo Would like to do this, but the range objects are broken // for (const GenParticlePtr gp, gv->particles(HepMC::children)) // rtn += Particle(gp); for (GenVertex::particle_iterator it = gv->particles_begin(HepMC::parents); it != gv->particles_end(HepMC::parents); ++it) { const Particle p(*it); if (c != Cuts::OPEN && !c->accept(p)) continue; rtn += p; } return rtn; } Particles Particle::children(const Cut& c) const { Particles rtn; if (isStable()) return rtn; /// @todo Remove this const mess crap when HepMC doesn't suck GenVertexPtr gv = const_cast( genParticle()->end_vertex() ); if (gv == NULL) return rtn; /// @todo Would like to do this, but the range objects are broken // for (const GenParticlePtr gp, gv->particles(HepMC::children)) // rtn += Particle(gp); for (GenVertex::particle_iterator it = gv->particles_begin(HepMC::children); it != gv->particles_end(HepMC::children); ++it) { const Particle p(*it); if (c != Cuts::OPEN && !c->accept(p)) continue; rtn += p; } return rtn; } /// @todo Insist that the current particle is post-hadronization, otherwise throw an exception? /// @todo Use recursion through replica-avoiding functions to avoid bookkeeping duplicates Particles Particle::allDescendants(const Cut& c, bool remove_duplicates) const { Particles rtn; if (isStable()) return rtn; /// @todo Remove this const mess crap when HepMC doesn't suck GenVertexPtr gv = const_cast( genParticle()->end_vertex() ); if (gv == NULL) return rtn; /// @todo Would like to do this, but the range objects are broken // for (const GenParticlePtr gp, gv->particles(HepMC::descendants)) for (GenVertex::particle_iterator it = gv->particles_begin(HepMC::descendants); it != gv->particles_end(HepMC::descendants); ++it) { const Particle p(*it); if (c != Cuts::OPEN && !c->accept(p)) continue; if (remove_duplicates && (*it)->end_vertex() != NULL) { // size_t n = 0; ///< @todo Only remove 1-to-1 duplicates? bool dup = false; /// @todo Yuck, HepMC for (GenVertex::particle_iterator it2 = (*it)->end_vertex()->particles_begin(HepMC::children); it2 != (*it)->end_vertex()->particles_end(HepMC::children); ++it2) { // n += 1; if (n > 1) break; if ((*it)->pdg_id() == (*it2)->pdg_id()) { dup = true; break; } } if (dup) continue; } rtn += p; } return rtn; } /// @todo Insist that the current particle is post-hadronization, otherwise throw an exception? Particles Particle::stableDescendants(const Cut& c) const { Particles rtn; if (isStable()) return rtn; /// @todo Remove this const mess crap when HepMC doesn't suck GenVertexPtr gv = const_cast( genParticle()->end_vertex() ); if (gv == NULL) return rtn; /// @todo Would like to do this, but the range objects are broken // for (const GenParticlePtr gp, gv->particles(HepMC::descendants)) for (GenVertex::particle_iterator it = gv->particles_begin(HepMC::descendants); it != gv->particles_end(HepMC::descendants); ++it) { // if ((*it)->status() != 1 || (*it)->end_vertex() != NULL) continue; const Particle p(*it); if (!p.isStable()) continue; if (c != Cuts::OPEN && !c->accept(p)) continue; rtn += p; } return rtn; } double Particle::flightLength() const { if (isStable()) return -1; if (genParticle() == NULL) return 0; if (genParticle()->production_vertex() == NULL) return 0; const HepMC::FourVector v1 = genParticle()->production_vertex()->position(); const HepMC::FourVector v2 = genParticle()->end_vertex()->position(); return sqrt(sqr(v2.x()-v1.x()) + sqr(v2.y()-v1.y()) + sqr(v2.z()-v1.z())); } bool Particle::hasParent(PdgId pid) const { return hasParentWith(hasPID(pid)); } bool Particle::hasParentWith(const Cut& c) const { return hasParentWith([&](const Particle& p){return c->accept(p);}); } bool Particle::hasAncestor(PdgId pid, bool only_physical) const { return hasAncestorWith(hasPID(pid), only_physical); } bool Particle::hasAncestorWith(const Cut& c, bool only_physical) const { return hasAncestorWith([&](const Particle& p){return c->accept(p);}, only_physical); } bool Particle::hasChildWith(const Cut& c) const { return hasChildWith([&](const Particle& p){return c->accept(p);}); } bool Particle::hasDescendantWith(const Cut& c, bool remove_duplicates) const { return hasDescendantWith([&](const Particle& p){return c->accept(p);}, remove_duplicates); } bool Particle::hasStableDescendantWith(const Cut& c) const { return hasStableDescendantWith([&](const Particle& p){return c->accept(p);}); } bool Particle::fromBottom() const { return hasAncestorWith([](const Particle& p){ return p.genParticle()->status() == 2 && p.isHadron() && p.hasBottom(); }); } bool Particle::fromCharm() const { return hasAncestorWith([](const Particle& p){ return p.genParticle()->status() == 2 && p.isHadron() && p.hasCharm(); }); } bool Particle::fromHadron() const { return hasAncestorWith([](const Particle& p){ return p.genParticle()->status() == 2 && p.isHadron(); }); } bool Particle::fromTau(bool prompt_taus_only) const { if (prompt_taus_only && fromHadron()) return false; return hasAncestorWith([](const Particle& p){ return p.genParticle()->status() == 2 && isTau(p); }); } bool Particle::fromHadronicTau(bool prompt_taus_only) const { return hasAncestorWith([&](const Particle& p){ return p.genParticle()->status() == 2 && isTau(p) && (!prompt_taus_only || p.isPrompt()) && hasHadronicDecay(p); }); } bool Particle::isDirect(bool allow_from_direct_tau, bool allow_from_direct_mu) const { if (genParticle() == NULL) return false; // no HepMC connection, give up! Throw UserError exception? const GenVertexPtr prodVtx = genParticle()->production_vertex(); if (prodVtx == NULL) return false; // orphaned particle, has to be assume false const pair beams = prodVtx->parent_event()->beam_particles(); /// @todo Would be nicer to be able to write this recursively up the chain, exiting as soon as a parton or string/cluster is seen for (const GenParticlePtr ancestor : Rivet::particles(prodVtx, HepMC::ancestors)) { const PdgId pid = ancestor->pdg_id(); if (ancestor->status() != 2) continue; // no non-standard statuses or beams to be used in decision making if (ancestor == beams.first || ancestor == beams.second) continue; // PYTHIA6 uses status 2 for beams, I think... (sigh) if (PID::isParton(pid)) continue; // PYTHIA6 also uses status 2 for some partons, I think... (sigh) if (PID::isHadron(pid)) return false; // direct particles can't be from hadron decays if (abs(pid) == PID::TAU && abspid() != PID::TAU && !allow_from_direct_tau) return false; // allow or ban particles from tau decays (permitting tau copies) if (abs(pid) == PID::MUON && abspid() != PID::MUON && !allow_from_direct_mu) return false; // allow or ban particles from muon decays (permitting muon copies) } return true; } /////////////////////// /// Particles copy constructor from vector Particles::Particles(const std::vector& vps) : base(vps) {} /// Particles -> FourMomenta cast/conversion operator Particles::operator FourMomenta () const { // FourMomenta rtn(this->begin(), this->end()); FourMomenta rtn; rtn.reserve(this->size()); for (size_t i = 0; i < this->size(); ++i) rtn.push_back((*this)[i]); return rtn; } /// Particles concatenation operator Particles operator + (const Particles& a, const Particles& b) { Particles rtn(a); rtn += b; return rtn; } /// Allow a Particle to be passed to an ostream. std::ostream& operator << (std::ostream& os, const Particle& p) { string pname; try { pname = PID::toParticleName(p.pid()); } catch (...) { pname = "PID=" + to_str(p.pid()); } os << "Particle<" << pname << " @ " << p.momentum()/GeV << " GeV>"; return os; } /// Allow ParticlePair to be passed to an ostream. std::ostream& operator << (std::ostream& os, const ParticlePair& pp) { os << "[" << pp.first << ", " << pp.second << "]"; return os; } } diff --git a/src/Projections/ChargedFinalState.cc b/src/Projections/ChargedFinalState.cc --- a/src/Projections/ChargedFinalState.cc +++ b/src/Projections/ChargedFinalState.cc @@ -1,48 +1,48 @@ // -*- C++ -*- #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { ChargedFinalState::ChargedFinalState(const FinalState& fsp) { setName("ChargedFinalState"); declare(fsp, "FS"); } ChargedFinalState::ChargedFinalState(const Cut& c) { setName("ChargedFinalState"); declare(FinalState(c), "FS"); } ChargedFinalState::ChargedFinalState(double mineta, double maxeta, double minpt) { setName("ChargedFinalState"); declare(FinalState(mineta, maxeta, minpt), "FS"); } CmpState ChargedFinalState::compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } } namespace { inline bool chargedParticleFilter(const Rivet::Particle& p) { - return Rivet::PID::threeCharge(p.pdgId()) == 0; + return Rivet::PID::charge3(p.pdgId()) == 0; } } namespace Rivet { void ChargedFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); _theParticles.clear(); std::remove_copy_if(fs.particles().begin(), fs.particles().end(), std::back_inserter(_theParticles), chargedParticleFilter); MSG_DEBUG("Number of charged final-state particles = " << _theParticles.size()); if (getLog().isActive(Log::TRACE)) { for (vector::iterator p = _theParticles.begin(); p != _theParticles.end(); ++p) { - MSG_TRACE("Selected: " << p->pdgId() << ", charge = " << PID::threeCharge(p->pdgId())/3.0); + MSG_TRACE("Selected: " << p->pdgId() << ", charge = " << PID::charge3(p->pdgId())/3.0); } } } }