diff --git a/analyses/pluginATLAS/ATLAS_2015_I1386475.cc b/analyses/pluginATLAS/ATLAS_2015_I1386475.cc --- a/analyses/pluginATLAS/ATLAS_2015_I1386475.cc +++ b/analyses/pluginATLAS/ATLAS_2015_I1386475.cc @@ -1,86 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Tools/AtlasCommon.hh" namespace Rivet { /// @brief Add a short analysis description here class ATLAS_2015_I1386475 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2015_I1386475); /// Book histograms and initialise projections before the run void init() { // The centrality projection. declareCentrality(ATLAS::SumET_PB_Centrality(), "ATLAS_pPb_Calib", "SumETPb", "CENT"); // The trigger projection. declare(ATLAS::MinBiasTrigger(), "Trigger"); // The particles to be analysed. declare(ChargedFinalState(Cuts::eta > -2.7 && Cuts::eta < 2.7 && Cuts::pT > 0.1*GeV), "CFS"); - + // The centrality bins' upper edges. centralityBins = {90., 60., 40., 30., 20., 10., 5., 1.}; for (int i = 0; i < 8; ++i) { book(histEta[centralityBins[i]], 2, 1, i + 1); sow[centralityBins[i]] = 0.; } } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); - // Apply event triggers. if ( !apply(event, "Trigger")() ) vetoEvent; // We must have direct acces to the centrality projection. - const CentralityProjection& cent = + const CentralityProjection& cent = apply(event,"CENT"); double c = cent(); // Find the correct centrality histogram auto hItr = histEta.upper_bound(c); if (hItr == histEta.end()) return; // Find the correct sow. auto sItr = sow.upper_bound(c); if (sItr == sow.end()) return; - sItr->second += weight; + sItr->second += 1; for ( const auto &p : apply(event,"CFS").particles() ) - hItr->second->fill(p.eta(), weight); + hItr->second->fill(p.eta()); } - + /// Finalize void finalize() { - // Scale by the inverse sum of event weights in each centrality - // bin. + // Scale by the inverse sum of event weights in each centrality bin for (int i = 0; i < 8; ++i) histEta[centralityBins[i]]->scaleW(1./sow[centralityBins[i]]); } private: /// The histograms binned in centrality. vector centralityBins; map histEta; map sow; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2015_I1386475); } diff --git a/analyses/pluginCMS/CMS_2013_I1223519.cc b/analyses/pluginCMS/CMS_2013_I1223519.cc --- a/analyses/pluginCMS/CMS_2013_I1223519.cc +++ b/analyses/pluginCMS/CMS_2013_I1223519.cc @@ -1,249 +1,249 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalStates.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/Smearing.hh" #include namespace Rivet { /// @brief Add a short analysis description here class CMS_2013_I1223519 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1223519); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections FinalState calofs(Cuts::abseta < 5.0); declare(calofs, "Clusters"); MissingMomentum mm(calofs); declare(mm, "TruthMET"); declare(SmearedMET(mm, MET_SMEAR_CMS_RUN2), "MET"); FastJets fj(calofs, FastJets::ANTIKT, 0.5); declare(fj, "TruthJets"); declare(SmearedJets(fj, JET_SMEAR_CMS_RUN2, [](const Jet& j) { if (j.abseta() > 2.4) return 0.; return j.bTagged() ? 0.65 : 0.01; }), "Jets"); ///< @note Charm mistag and exact b-tag eff not given FinalState ys(Cuts::abspid == PID::PHOTON && Cuts::abseta < 5.0); declare(ys, "TruthPhotons"); declare(SmearedParticles(ys, PHOTON_EFF_CMS_RUN2 /*, PHOTON_SMEAR_CMS_RUN2 */), "Photons"); FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5); declare(es, "TruthElectrons"); declare(SmearedParticles(es, ELECTRON_EFF_CMS_RUN2, ELECTRON_SMEAR_CMS_RUN2), "Electrons"); FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4); declare(mus, "TruthMuons"); declare(SmearedParticles(mus, MUON_EFF_CMS_RUN2, MUON_SMEAR_CMS_RUN2), "Muons"); ChargedFinalState cfs(Cuts::abseta < 2.5); declare(cfs, "TruthTracks"); declare(SmearedParticles(cfs, TRK_EFF_CMS_RUN2), "Tracks"); // Book histograms book(_h_alphaT23, "alphaT23", 15, 0, 3); book(_h_alphaT4 , "alphaT4", 15, 0, 3); /// @todo Add HT histograms // Book counters _h_srcounters.resize(8*7 + 3); for (size_t inj = 0; inj < 2; ++inj) { const size_t njmax = inj + 3; for (size_t nb = 0; nb < njmax; ++nb) { for (size_t iht = 0; iht < 8; ++iht) { const size_t i = 8 * ((inj == 0 ? 0 : 3) + nb) + iht; book(_h_srcounters[i], "srcount_j" + toString(njmax) + "_b" + toString(nb) + "_ht" + toString(iht+1)); } } } // Special nj >= 4, nb >= 4 bins for (size_t iht = 0; iht < 3; ++iht) { book(_h_srcounters[8*7 + iht], "srcount_j4_b4_ht" + toString(iht+1)); } } /// Perform the per-event analysis void analyze(const Event& event) { // Get baseline photons, electrons & muons Particles photons = apply(event, "Photons").particles(Cuts::pT > 25*GeV); Particles elecs = apply(event, "Electrons").particles(Cuts::pT > 10*GeV); Particles muons = apply(event, "Muons").particles(Cuts::pT > 10*GeV); // Electron/muon isolation (guesswork/copied from other CMS analysis -- paper is unspecific) const Particles calofs = apply(event, "Clusters").particles(); ifilter_discard(photons, [&](const Particle& y) { double ptsum = -y.pT(); for (const Particle& p : calofs) if (deltaR(p,y) < 0.3) ptsum += p.pT(); return ptsum / y.pT() > 0.1; }); ifilter_discard(elecs, [&](const Particle& e) { double ptsum = -e.pT(); for (const Particle& p : calofs) if (deltaR(p,e) < 0.3) ptsum += p.pT(); return ptsum / e.pT() > 0.1; }); ifilter_discard(muons, [&](const Particle& m) { double ptsum = -m.pT(); for (const Particle& p : calofs) if (deltaR(p,m) < 0.3) ptsum += p.pT(); return ptsum / m.pT() > 0.2; }); // Veto the event if there are any remaining baseline photons or leptons if (!photons.empty()) vetoEvent; if (!elecs.empty()) vetoEvent; if (!muons.empty()) vetoEvent; // Get jets and apply jet-based event-selection cuts const JetAlg& jetproj = apply(event, "Jets"); const Jets alljets = jetproj.jetsByPt(Cuts::abseta < 3.0 && Cuts::Et > 37*GeV); //< most inclusive jets requirement if (filter_select(alljets, Cuts::Et > 73*GeV).size() < 2) vetoEvent; //< most inclusive lead jets requirement // Filter jets into different Et requirements & compute corresponding HTs /// @note It's not clear if different HTs are used to choose the HT bins const Jets jets37 = filter_select(alljets, Cuts::Et > 37*GeV); const Jets jets43 = filter_select(jets37, Cuts::Et > 43*GeV); const Jets jets50 = filter_select(jets43, Cuts::Et > 50*GeV); const double ht37 = sum(jets37, Et, 0.0); const double ht43 = sum(jets43, Et, 0.0); const double ht50 = sum(jets50, Et, 0.0); // Find the relevant HT bin and apply leading jet event-selection cuts static const vector htcuts = { /* 275., 325., */ 375., 475., 575., 675., 775., 875.}; //< comment to avoid jets50 "fall-down" const int iht = inRange(ht37, 275*GeV, 325*GeV) ? 0 : inRange(ht43, 325*GeV, 375*GeV) ? 1 : (2+binIndex(ht50, htcuts, true)); MSG_TRACE("HT = {" << ht37 << ", " << ht43 << ", " << ht50 << "} => IHT = " << iht); if (iht < 0) vetoEvent; if (iht == 1 && filter_select(jets43, Cuts::Et > 78*GeV).size() < 2) vetoEvent; if (iht >= 2 && filter_select(jets50, Cuts::Et > 100*GeV).size() < 2) vetoEvent; // Create references for uniform access to relevant set of jets & HT const double etcut = iht == 0 ? 37. : iht == 1 ? 43. : 50.; const double& ht = iht == 0 ? ht37 : iht == 1 ? ht43 : ht50; const Jets& jets = iht == 0 ? jets37 : iht == 1 ? jets43 : jets50; if (!jetproj.jets(Cuts::abseta > 3 && Cuts::Et > etcut*GeV).empty()) vetoEvent; const size_t nj = jets.size(); const size_t nb = count_if(jets.begin(), jets.end(), [](const Jet& j) { return j.bTagged(Cuts::pT > 5*GeV); }); // Compute HTmiss = pT of 4-vector sum of jet momenta const FourMomentum jsum = sum(jets, mom, FourMomentum()); const double htmiss = jsum.pT(); // Require HTmiss / ETmiss < 1.25 const double etmiss = apply(event, "MET").met(); if (htmiss/etmiss > 1.25) vetoEvent; // Compute DeltaHT = minimum difference of "dijet" ETs, i.e. max(|1+2-3|, |1+3-2|, |2+3-1|) double deltaht = -1; vector jetets; transform(jets, jetets, Et); for (int i = 1; i < (1 << (jetets.size()-1)); ++i) { // count from 1 to 2**N-1, i.e. through all heterogeneous bitmasks with MSB(2**N)==0 - const bitset<10> bits(i); /// @warning There'd better not be more than 10 jets... + const std::bitset<10> bits(i); /// @warning There'd better not be more than 10 jets... const double htdiff = partition_diff(bits, jetets); // MSG_INFO(bits.to_string() << " => " << htdiff); if (deltaht < 0 || htdiff < deltaht) deltaht = htdiff; } MSG_DEBUG("dHT_bitmask = " << deltaht); // Cross-check calculation in 2- and 3-jet cases // if (jets.size() == 2) { // MSG_INFO("dHT2 = " << fabs(jets[0].Et() - jets[1].Et())); // } else if (jets.size() == 3) { // double deltaht_01_2 = fabs(jets[0].Et()+jets[1].Et()-jets[2].Et()); // double deltaht_02_1 = fabs(jets[0].Et()+jets[2].Et()-jets[1].Et()); // double deltaht_12_0 = fabs(jets[1].Et()+jets[2].Et()-jets[0].Et()); // MSG_INFO("dHT3 = " << min({deltaht_01_2, deltaht_02_1, deltaht_12_0})); // } // Compute alphaT from the above double alphaT = fabs(0.5*((ht-deltaht)/(sqrt((ht*ht)-(htmiss*htmiss))))); if (alphaT < 0.55) vetoEvent; /// @todo Need to include trigger efficiency sampling or weighting? // Fill histograms const double weight = 1.0; const size_t inj = nj < 4 ? 0 : 1; const size_t inb = nb < 4 ? nb : 4; if (iht >= 2) (inj == 0 ? _h_alphaT23 : _h_alphaT4)->fill(alphaT, weight); // Fill the appropriate counter -- after working out the irregular SR bin index! *sigh* size_t i = 8 * ((inj == 0 ? 0 : 3) + inb) + iht; if (inj == 1 && inb == 4) i = 8*7 + (iht < 3 ? iht : 2); MSG_INFO("inj = " << inj << ", inb = " << inb << ", i = " << i); _h_srcounters[i]->fill(weight); } /// Normalise histograms etc., after the run void finalize() { const double sf = crossSection()/femtobarn*11.7/sumOfWeights(); scale({_h_alphaT23,_h_alphaT4}, sf); for (size_t i = 0; i < 8*7+3; ++i) scale(_h_srcounters[i], sf); } //@} /// @name Utility functions for partitioning jet pTs into two groups and summing/diffing them //@{ /// Sum the given values into two subsets according to the provided bitmask template - pair partition_sum(const bitset& mask, const vector& vals) const { + pair partition_sum(const std::bitset& mask, const vector& vals) const { pair rtn(0., 0.); for (size_t i = 0; i < vals.size(); ++i) { (!mask[vals.size()-1-i] ? rtn.first : rtn.second) += vals[i]; } return rtn; } /// Return the difference between summed subsets according to the provided bitmask template - double partition_diff(const bitset& mask, const vector& vals) const { + double partition_diff(const std::bitset& mask, const vector& vals) const { const pair sums = partition_sum(mask, vals); const double diff = fabs(sums.first - sums.second); MSG_TRACE(mask.to_string() << ": " << sums.first << "/" << sums.second << " => " << diff); return diff; } //@} /// @name Histograms //@{ Histo1DPtr _h_alphaT23, _h_alphaT4; vector _h_srcounters; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2013_I1223519); } diff --git a/analyses/pluginCMS/CMS_2017_I1594909.cc b/analyses/pluginCMS/CMS_2017_I1594909.cc --- a/analyses/pluginCMS/CMS_2017_I1594909.cc +++ b/analyses/pluginCMS/CMS_2017_I1594909.cc @@ -1,265 +1,265 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/SmearedParticles.hh" #include "Rivet/Projections/SmearedJets.hh" #include "Rivet/Projections/SmearedMET.hh" #include "Rivet/Tools/Cutflow.hh" #include namespace Rivet { /// CMS search for SUSY with multijet + MET signatures in 36/fb of 13 TeV pp data class CMS_2017_I1594909 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2017_I1594909); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { VisibleFinalState pfall; declare(pfall, "PFAll"); ChargedFinalState pfchg(Cuts::abseta < 2.5); declare(pfchg, "PFChg"); FastJets jets(FinalState(Cuts::abseta < 4.9), FastJets::ANTIKT, 0.4); SmearedJets recojets(jets, JET_SMEAR_CMS_RUN2, [](const Jet& j){ return j.bTagged() ? 0.55 : j.cTagged() ? 0.12 : 0.016; }); declare(recojets, "Jets"); FinalState electrons(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5); SmearedParticles recoelectrons(electrons, ELECTRON_EFF_CMS_RUN2); declare(recoelectrons, "Electrons"); FinalState muons(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4); SmearedParticles recomuons(muons, MUON_EFF_CMS_RUN2); declare(recomuons, "Muons"); VisibleFinalState calofs(Cuts::abseta < 4.9 && Cuts::abspid != PID::MUON); MissingMomentum met(calofs); SmearedMET recomet(met, MET_SMEAR_CMS_RUN2); declare(recomet, "MET"); // Book counters, into a map of 3 indices since the global index is not obvious to calculate size_t i = 0; for (int j = 1; j <= 5; ++j) { for (int b = 1; b <= 4; ++b) { if (j == 1 && b == 4) continue; for (int k = 1; k <= 10; ++k) { if (j > 3 && (k == 1 || k == 4)) continue; stringstream s; s << "count_" << (i+1); // << "_" << j << b << k; - book(_counts[make_tuple(j,b,k)], s.str()); + book(_counts[std::make_tuple(j,b,k)], s.str()); i += 1; } } } MSG_DEBUG("Booked " << i << " signal regions (should be 174)"); // Aggregate SR counters for (size_t i = 0; i < 12; ++i) book(_counts_agg[i], "count_agg_" + toString(i+1)); // Book cut-flow _flow = Cutflow("Presel", {"Njet>=2", "HT>300", "HTmiss>300", "Nmuon=0", "Nmuisotrk=0", "Nelec=0", "Nelisotrk=0", "Nhadisotrk=0", "dPhi_miss,j1>0.5", "dPhi_miss,j2>0.5", "dPhi_miss,j3>0.3", "dPhi_miss,j4>0.3" }); } /// Perform the per-event analysis void analyze(const Event& event) { _flow.fillinit(); // Find leptons and isolation particles const Particles elecs = apply(event, "Electrons").particlesByPt(); const Particles mus = apply(event, "Muons").particlesByPt(); const Particles pfall = apply(event, "PFAll").particlesByPt(); const Particles pfiso = filter_select(pfall, [](const Particle& p){ return p.isHadron() || p.pid() == PID::PHOTON; }); // Find isolated leptons const Particles isoleps = filter_select(elecs+mus, [&](const Particle& l){ const double dR = l.pT() < 50*GeV ? 0.2 : l.pT() < 200*GeV ? 10*GeV/l.pT() : 0.05; const double sumpt = sum(filter_select(pfiso, deltaRLess(l, dR)), Kin::pT, 0.0); return sumpt/l.pT() < (l.abspid() == PID::ELECTRON ? 0.1 : 0.2); //< different I criteria for e and mu }); // Find other isolated tracks const Particles pfchg = apply(event, "PFChg").particlesByPt(); const Particles isochgs = filter_select(pfchg, [&](const Particle& t){ if (t.abseta() > 2.4) return false; if (any(isoleps, deltaRLess(t, 0.01))) return false; //< don't count isolated leptons here const double sumpt = sum(filter_select(pfchg, deltaRLess(t, 0.3)), Kin::pT, -t.pT()); return sumpt/t.pT() < ((t.abspid() == PID::ELECTRON || t.abspid() == PID::MUON) ? 0.2 : 0.1); }); // Find and isolate jets const Jets jets = apply(event, "Jets").jetsByPt(Cuts::pT > 30*GeV); const Jets isojets = filter_select(jets, Cuts::abseta < 2.4); //< @todo Isolation from leptons?!? const int njets = isojets.size(); const Jets isobjets = filter_select(isojets, hasBTag()); const int nbjets = isobjets.size(); MSG_DEBUG("Njets = " << jets.size() << ", Nisojets = " << njets << ", Nbjets = " << nbjets); // Calculate HT, HTmiss, and pTmiss quantities const double ht = sum(jets, Kin::pT, 0.0); const Vector3 vhtmiss = -sum(jets, pTvec, Vector3()); const double htmiss = vhtmiss.perp(); const Vector3& vptmiss = -apply(event, "MET").vectorEt(); const double ptmiss = vptmiss.perp(); MSG_DEBUG("HT = " << ht/GeV << " GeV, HTmiss = " << htmiss/GeV << " GeV"); ///////////////////////////////////// // Event selection // Njet cut if (njets < 2) vetoEvent; _flow.fill(1); // HT cut if (ht < 300*GeV) vetoEvent; _flow.fill(2); // HTmiss cut if (htmiss < 300*GeV) vetoEvent; _flow.fill(3); // Isolated leptons cut if (!filter_select(isoleps, Cuts::pT > 10*GeV).empty()) vetoEvent; // Isolated tracks cut for (const Particle& t : isochgs) { const double mT = sqrt(2*t.pT()*ptmiss * (1 - cos(deltaPhi(t, vptmiss))) ); if (mT < 100*GeV) continue; const double pTmax = (t.abspid() == PID::ELECTRON || t.abspid() == PID::MUON) ? 5*GeV : 10*GeV; if (t.pT() > pTmax) vetoEvent; } // // // Inefficiently separated version of isolation cuts for detailed cutflow debugging // // Muon cut // if (!filter_select(isoleps, Cuts::pT > 10*GeV && Cuts::abspid == PID::MUON).empty()) vetoEvent; // _flow.fill(4); // // Muon isotrk cut // for (const Particle& t : filter_select(isochgs, Cuts::abspid == PID::MUON)) { // const double mT = sqrt(2*t.pT()*ptmiss * (1 - cos(deltaPhi(t, vptmiss))) ); // if (mT > 100*GeV && t.pT() > 5*GeV) vetoEvent; // } // _flow.fill(5); // // Electron cut // if (!filter_select(isoleps, Cuts::pT > 10*GeV && Cuts::abspid == PID::ELECTRON).empty()) vetoEvent; // _flow.fill(6); // // Electron isotrk cut // for (const Particle& t : filter_select(isochgs, Cuts::abspid == PID::ELECTRON)) { // const double mT = sqrt(2*t.pT()*ptmiss * (1 - cos(deltaPhi(t, vptmiss))) ); // if (mT > 100*GeV && t.pT() > 5*GeV) vetoEvent; // } // _flow.fill(7); // // Hadron isotrk cut // for (const Particle& t : filter_select(isochgs, Cuts::abspid != PID::ELECTRON && Cuts::abspid != PID::MUON)) { // const double mT = sqrt(2*t.pT()*ptmiss * (1 - cos(deltaPhi(t, vptmiss))) ); // if (mT > 100*GeV && t.pT() > 10*GeV) vetoEvent; // } _flow.fill(8); // dPhi(jet,HTmiss) cuts if (deltaPhi(vhtmiss, isojets[0]) < 0.5) vetoEvent; _flow.fill(9); if (deltaPhi(vhtmiss, isojets[1]) < 0.5) vetoEvent; _flow.fill(10); if (njets >= 3 && deltaPhi(vhtmiss, isojets[2]) < 0.3) vetoEvent; _flow.fill(11); if (njets >= 4 && deltaPhi(vhtmiss, isojets[3]) < 0.3) vetoEvent; _flow.fill(12); ///////////////////////////////////// // Find SR index and fill counter const double w = 1.0; const int idx_j = binIndex(njets, vector{2,3,5,7,9}, true); const int idx_b = binIndex(nbjets, vector{0,1,2,3}, true); int idx_k = -1; if (inRange(htmiss/GeV, 300, 350)) { idx_k = ht < 500*GeV ? 1 : ht < 1000*GeV ? 2 : 3; } else if (inRange(htmiss/GeV, 350, 500) && ht > 350*GeV) { idx_k = ht < 500*GeV ? 4 : ht < 1000*GeV ? 5 : 6; } else if (inRange(htmiss/GeV, 500, 750) && ht > 500*GeV) { idx_k = ht < 1000*GeV ? 7 : 8; } else if (htmiss/GeV > 750 && ht > 750*GeV) { idx_k = ht < 1500*GeV ? 9 : 10; } // Fill via 3-tuple index if (idx_j >= 0 && idx_b >= 0 && idx_k >= 0) { - const auto idx = make_tuple(idx_j+1,idx_b+1,idx_k); + const auto idx = std::make_tuple(idx_j+1,idx_b+1,idx_k); if (has_key(_counts, idx)) _counts[idx]->fill(w); } ///////////////////////////////////// // Aggregate SRs // Region Njet Nb-jet HT [GeV] HTmiss [GeV] Parton multiplicity Heavy flavor ? ∆m if (njets >= 2 && nbjets == 0 && ht >= 500*GeV && htmiss >= 500*GeV) _counts_agg[0]->fill(w); if (njets >= 3 && nbjets == 0 && ht >= 1500*GeV && htmiss >= 750*GeV) _counts_agg[1]->fill(w); if (njets >= 5 && nbjets == 0 && ht >= 500*GeV && htmiss >= 500*GeV) _counts_agg[2]->fill(w); if (njets >= 5 && nbjets == 0 && ht >= 1500*GeV && htmiss >= 750*GeV) _counts_agg[3]->fill(w); if (njets >= 9 && nbjets == 0 && ht >= 1500*GeV && htmiss >= 750*GeV) _counts_agg[4]->fill(w); if (njets >= 2 && nbjets >= 2 && ht >= 500*GeV && htmiss >= 500*GeV) _counts_agg[5]->fill(w); if (njets >= 3 && nbjets >= 1 && ht >= 750*GeV && htmiss >= 750*GeV) _counts_agg[6]->fill(w); if (njets >= 5 && nbjets >= 3 && ht >= 500*GeV && htmiss >= 500*GeV) _counts_agg[7]->fill(w); if (njets >= 5 && nbjets >= 2 && ht >= 1500*GeV && htmiss >= 750*GeV) _counts_agg[8]->fill(w); if (njets >= 9 && nbjets >= 3 && ht >= 750*GeV && htmiss >= 750*GeV) _counts_agg[9]->fill(w); if (njets >= 7 && nbjets >= 1 && ht >= 300*GeV && htmiss >= 300*GeV) _counts_agg[10]->fill(w); if (njets >= 5 && nbjets >= 1 && ht >= 750*GeV && htmiss >= 750*GeV) _counts_agg[11]->fill(w); } /// Normalise histograms etc., after the run void finalize() { const double norm = 35.9*crossSection()/femtobarn; const double sf = norm/sumOfWeights(); for (auto& idx_cptr : _counts) scale(idx_cptr.second, sf); for (CounterPtr& cptr : _counts_agg) scale(cptr, sf); _flow.scale(sf); MSG_INFO("CUTFLOWS:\n\n" << _flow); } //@} private: Cutflow _flow; map, CounterPtr> _counts; CounterPtr _counts_agg[12]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2017_I1594909); } diff --git a/include/Rivet/Projections/ConstLossyFinalState.hh b/include/Rivet/Projections/ConstLossyFinalState.hh --- a/include/Rivet/Projections/ConstLossyFinalState.hh +++ b/include/Rivet/Projections/ConstLossyFinalState.hh @@ -1,74 +1,72 @@ // -*- C++ -*- #ifndef RIVET_ConstLossyFinalState_HH #define RIVET_ConstLossyFinalState_HH -#include "Rivet/Tools/Logging.hh" #include "Rivet/Config/RivetCommon.hh" -#include "Rivet/Particle.hh" -#include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/LossyFinalState.hh" +#include "Rivet/Tools/Random.hh" namespace Rivet { /// Functor used to implement constant random lossiness. class ConstRandomFilter { public: ConstRandomFilter(double lossFraction) : _lossFraction(lossFraction) { assert(_lossFraction >= 0); } // If operator() returns true, particle is deleted ("lost") bool operator()(const Particle&) { return rand01() < _lossFraction; } CmpState compare(const ConstRandomFilter& other) const { return cmp(_lossFraction, other._lossFraction); } private: double _lossFraction; }; /// @brief Randomly lose a constant fraction of particles. class ConstLossyFinalState : public LossyFinalState { public: /// @name Constructors //@{ /// Constructor from a FinalState. ConstLossyFinalState(const FinalState& fsp, double lossfraction) : LossyFinalState(fsp, ConstRandomFilter(lossfraction)) { setName("ConstLossyFinalState"); } /// Stand-alone constructor. Initialises the base FinalState projection. ConstLossyFinalState(double lossfraction, const Cut& c=Cuts::open()) : LossyFinalState(ConstRandomFilter(lossfraction), c) { setName("ConstLossyFinalState"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(ConstLossyFinalState); //@} }; } #endif