diff --git a/analyses/pluginBES/BESIII_2018_I1699641.cc b/analyses/pluginBES/BESIII_2018_I1699641.cc --- a/analyses/pluginBES/BESIII_2018_I1699641.cc +++ b/analyses/pluginBES/BESIII_2018_I1699641.cc @@ -1,139 +1,140 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class BESIII_2018_I1699641 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BESIII_2018_I1699641); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); declare(UnstableParticles(), "UFS"); // Book histograms book(_cKKpipi, "TMP/2Kpipi" ); book(_cKKpieta, "TMP/2Kpieta"); } void findChildren(const Particle & p,map & nRes, int &ncount) { for (const Particle &child : p.children()) { if(child.children().empty()) { nRes[child.pid()]-=1; --ncount; } else findChildren(child,nRes,ncount); } } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); for (const Particle& p : fs.particles()) { nCount[p.pid()] += 1; ++ntotal; } // K K pi pi if(ntotal==4 && nCount[310]==1 && nCount[111]==1 && ((nCount[ 321]==1 &&nCount[-211]==1) || (nCount[-321]==1 &&nCount[ 211]==1) )) _cKKpipi->fill(); // eta resonance const FinalState& ufs = apply(event, "UFS"); for (const Particle& p : ufs.particles()) { if(p.children().empty()) continue; if(p.pid()!=221) continue; map nRes=nCount; int ncount = ntotal; findChildren(p,nRes,ncount); if(ncount!=3) continue; bool matched=true; for(auto const & val : nRes) { if(abs(val.first)==321 || abs(val.first)==211) { continue; } else if(abs(val.first)==310) { if(val.second!=1) { matched = false; break; } } else if(val.second!=0) { matched = false; break; } } if(matched==false) continue; if((nCount[ 321] == 1 && nCount[-211] ==1) || (nCount[-321] == 1 && nCount[ 211] ==1)) _cKKpieta->fill(); } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=1;ix<3;++ix) { double sigma = 0., error = 0.; if(ix==1) { sigma = _cKKpipi->val(); error = _cKKpipi->err(); } else if(ix==2) { sigma = _cKKpieta->val(); error = _cKKpieta->err(); } sigma *= crossSection()/ sumOfWeights() /picobarn; error *= crossSection()/ sumOfWeights() /picobarn; Scatter2D temphisto(refData(ix, 1, 1)); Scatter2DPtr mult; book(mult, ix, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _cKKpipi,_cKKpieta; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BESIII_2018_I1699641); } diff --git a/analyses/pluginCESR/CLEOC_2005_I693873.cc b/analyses/pluginCESR/CLEOC_2005_I693873.cc --- a/analyses/pluginCESR/CLEOC_2005_I693873.cc +++ b/analyses/pluginCESR/CLEOC_2005_I693873.cc @@ -1,108 +1,109 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEOC_2005_I693873 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEOC_2005_I693873); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _npipi = bookCounter("TMP/npipi"); - _nKK = bookCounter("TMP/nKK"); - _nppbar = bookCounter("TMP/nppbar"); + book(_npipi, "TMP/npipi"); + book(_nKK, "TMP/nKK"); + book(_nppbar, "TMP/nppbar"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } if(ntotal!=2) vetoEvent; if(nCount[211]==1 && nCount[-211]==1) - _npipi->fill(event.weight()); + _npipi->fill(); else if(nCount[321]==1 && nCount[-321]==1) - _nKK->fill(event.weight()); + _nKK->fill(); else if(nCount[2212]==1 && nCount[-2212]==1) - _nppbar->fill(event.weight()); + _nppbar->fill(); } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=1;ix<4;++ix) { double sigma = 0., error = 0.; if(ix==1) { sigma = _npipi->val(); error = _npipi->err(); } else if(ix==2) { sigma = _nKK->val(); error = _nKK->err(); } else if(ix==3) { sigma = _nppbar->val(); error = _nppbar->err(); } sigma *= crossSection()/ sumOfWeights() /picobarn; error *= crossSection()/ sumOfWeights() /picobarn; Scatter2D temphisto(refData(1, 1, ix)); - Scatter2DPtr mult = bookScatter2D(1, 1, ix); + Scatter2DPtr mult; + book(mult, 1, 1, ix); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _npipi,_nKK,_nppbar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEOC_2005_I693873); } diff --git a/analyses/pluginCESR/CLEOC_2008_I777917.cc b/analyses/pluginCESR/CLEOC_2008_I777917.cc --- a/analyses/pluginCESR/CLEOC_2008_I777917.cc +++ b/analyses/pluginCESR/CLEOC_2008_I777917.cc @@ -1,363 +1,368 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEOC_2008_I777917 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEOC_2008_I777917); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); declare(UnstableParticles(), "UFS"); - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); - _c_D0D0 = bookCounter("/TMP/sigma_D0D0"); - _c_DpDm = bookCounter("/TMP/sigma_DpDm"); - _c_DsDs = bookCounter("/TMP/sigma_DsDs"); - _c_D0D0S = bookCounter("/TMP/sigma_D0D0S"); - _c_DpDmS = bookCounter("/TMP/sigma_DpDmS"); - _c_DsDsS = bookCounter("/TMP/sigma_DsDsS"); - _c_D0SD0S = bookCounter("/TMP/sigma_D0SD0S"); - _c_DpSDmS = bookCounter("/TMP/sigma_DpSDmS"); - _c_DsSDsS = bookCounter("/TMP/sigma_DsSDsS"); - _c_DD = bookCounter("/TMP/sigma_DD"); - _c_DDX = bookCounter("/TMP/sigma_DDX"); - _c_DSDpi = bookCounter("/TMP/sigma_DSDpi"); - _c_DSDSpi = bookCounter("/TMP/sigma_DSDSpi"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); + book(_c_D0D0, "/TMP/sigma_D0D0"); + book(_c_DpDm, "/TMP/sigma_DpDm"); + book(_c_DsDs, "/TMP/sigma_DsDs"); + book(_c_D0D0S, "/TMP/sigma_D0D0S"); + book(_c_DpDmS, "/TMP/sigma_DpDmS"); + book(_c_DsDsS, "/TMP/sigma_DsDsS"); + book(_c_D0SD0S, "/TMP/sigma_D0SD0S"); + book(_c_DpSDmS, "/TMP/sigma_DpSDmS"); + book(_c_DsSDsS, "/TMP/sigma_DsSDsS"); + book(_c_DD, "/TMP/sigma_DD"); + book(_c_DDX, "/TMP/sigma_DDX"); + book(_c_DSDpi, "/TMP/sigma_DSDpi"); + book(_c_DSDSpi, "/TMP/sigma_DSDSpi"); } void findChildren(const Particle & p,map & nRes, int &ncount) { - foreach(const Particle &child, p.children()) { + for (const Particle &child : p.children()) { if(child.children().empty()) { - nRes[child.pdgId()]-=1; + nRes[child.pid()]-=1; --ncount; } else findChildren(child,nRes,ncount); } } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); // total hadronic and muonic cross sections map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); // identified final state with D mesons const FinalState& ufs = apply(event, "UFS"); for(unsigned int ix=0;ix nRes = nCount; int ncount = ntotal; findChildren(p1,nRes,ncount); bool matched=false; - int sign = p1.pdgId()/id1; + int sign = p1.pid()/id1; // loop over the other fs particles for(unsigned int iy=ix+1;iyfill(event.weight()); + _c_DDX->fill(); map nRes2 = nRes; int ncount2 = ncount; findChildren(p2,nRes2,ncount2); if(ncount2==0) { matched=true; for(auto const & val : nRes2) { if(val.second!=0) { matched = false; break; } } if(matched) { if(id1==411 && id2==411) { - _c_DpDm->fill(event.weight()); - _c_DD ->fill(event.weight()); + _c_DpDm->fill(); + _c_DD ->fill(); } else if(id1==421&& id2==421) { - _c_D0D0->fill(event.weight()); - _c_DD ->fill(event.weight()); + _c_D0D0->fill(); + _c_DD ->fill(); } else if(id1==431&& id2==431) { - _c_DsDs->fill(event.weight()); + _c_DsDs->fill(); } else if(id1==413 && id2==413) { - _c_DpSDmS->fill(event.weight()); + _c_DpSDmS->fill(); } else if(id1==423&& id2==423) { - _c_D0SD0S->fill(event.weight()); + _c_D0SD0S->fill(); } else if(id1==433&& id2==433) { - _c_DsSDsS->fill(event.weight()); + _c_DsSDsS->fill(); } else if((id1==421 && id2==423) || (id1==423 && id2==421)) { - _c_D0D0S->fill(event.weight()); + _c_D0D0S->fill(); } else if((id1==411 && id2==413) || (id1==413 && id2==411)) { - _c_DpDmS->fill(event.weight()); + _c_DpDmS->fill(); } else if((id1==431 && id2==433) || (id1==433 && id2==431)) { - _c_DsDsS->fill(event.weight()); + _c_DsDsS->fill(); } } } else if(ncount2==1) { int ipi=0; if(nRes2[111]==1 && nRes2[211]==0 && nRes[-211]==0 ) ipi = 111; else if(nRes2[111]==0 && nRes2[211]==1 && nRes[-211]==0 ) ipi = 211; else if(nRes2[111]==0 && nRes2[211]==0 && nRes[-211]==1 ) ipi =-211; if(ipi==0) continue; matched=true; for(auto const & val : nRes2) { if(val.first==ipi) continue; else if(val.second!=0) { matched = false; break; } } if(matched) { bool Ddecay = false; Particle mother = p1; while (!mother.parents().empty()) { mother = mother.parents()[0]; - if(PID::isCharmMeson(mother.pdgId()) && mother.pdgId()!=p1.pdgId()) { + if(PID::isCharmMeson(mother.pid()) && mother.pid()!=p1.pid()) { Ddecay = true; break; } } mother = p2; while (!mother.parents().empty()) { mother = mother.parents()[0]; - if(PID::isCharmMeson(mother.pdgId()) && mother.pdgId()!=p1.pdgId()) { + if(PID::isCharmMeson(mother.pid()) && mother.pid()!=p1.pid()) { Ddecay = true; break; } } if(Ddecay) continue; if((id1==413 || id1==423 ) && (id2==413 || id2==423 )) { - _c_DSDSpi->fill(event.weight()); + _c_DSDSpi->fill(); } else if((id1==411 || id1==421 ) && (id2==413 || id2==423 )) { - _c_DSDpi->fill(event.weight()); + _c_DSDpi->fill(); } else if((id1==413 || id1==423 ) && (id2==411 || id2==421 )) { - _c_DSDpi->fill(event.weight()); + _c_DSDpi->fill(); } } } } } } /// Normalise histograms etc., after the run void finalize() { // R Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_c = _c_DDX->val()*fact; double err_c = _c_DDX->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(6, 1, 2)); - Scatter2DPtr charm = bookScatter2D(6,1,1); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(6,1,2); + Scatter2DPtr charm; + book(charm, 6,1,1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 6,1,2); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/MeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); charm ->addPoint(x, sig_c, ex, make_pair(err_c,err_c)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); charm ->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } for(unsigned int ix=1;ix<6;++ix) { unsigned int imax(0); if (ix<=3) imax = 4; else imax = 3; for(unsigned int iy=1;iyval()/picobarn; error = _c_D0D0->err()/picobarn; } else if(iy==2) { sigma = _c_D0D0S->val()/picobarn; error = _c_D0D0S->err()/picobarn; } else if(iy==3) { sigma = _c_D0SD0S->val()/picobarn; error = _c_D0SD0S->err()/picobarn; } } else if(ix==2) { if(iy==1) { sigma = _c_DpDm->val()/picobarn; error = _c_DpDm->err()/picobarn; } else if(iy==2) { sigma = _c_DpDmS->val()/picobarn; error = _c_DpDmS->err()/picobarn; } else if(iy==3) { sigma = _c_DpSDmS->val()/picobarn; error = _c_DpSDmS->err()/picobarn; } } else if(ix==3) { if(iy==1) { sigma = _c_DsDs->val()/picobarn; error = _c_DsDs->err()/picobarn; } else if(iy==2) { sigma = _c_DsDsS->val()/picobarn; error = _c_DsDsS->err()/picobarn; } else if(iy==3) { sigma = _c_DsSDsS->val()/picobarn; error = _c_DsSDsS->err()/picobarn; } } else if(ix==4) { if(iy==1) { sigma = _c_DSDpi->val()/picobarn; error = _c_DSDpi->err()/picobarn; } else if(iy==2) { sigma = _c_DSDSpi->val()/picobarn; error = _c_DSDSpi->err()/picobarn; } } else if(ix==5) { if(iy==1) { sigma = _c_DD->val()/nanobarn; error = _c_DD->err()/nanobarn; } else if(iy==2) { sigma = _c_DDX->val()/nanobarn; error = _c_DDX->err()/nanobarn; } } sigma *= crossSection()/ sumOfWeights(); error *= crossSection()/ sumOfWeights(); Scatter2D temphisto(refData(ix, 1, iy)); - Scatter2DPtr mult = bookScatter2D(ix,1,iy); + Scatter2DPtr mult; + book(mult, ix,1,iy); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/MeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } } //@} /// @name Histograms //@{ CounterPtr _c_D0D0, _c_DpDm,_c_DsDs; CounterPtr _c_D0D0S, _c_DpDmS,_c_DsDsS; CounterPtr _c_D0SD0S, _c_DpSDmS,_c_DsSDsS; CounterPtr _c_DD, _c_DDX; CounterPtr _c_DSDpi, _c_DSDSpi; CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEOC_2008_I777917); } diff --git a/analyses/pluginCESR/CLEO_1983_I188803.cc b/analyses/pluginCESR/CLEO_1983_I188803.cc --- a/analyses/pluginCESR/CLEO_1983_I188803.cc +++ b/analyses/pluginCESR/CLEO_1983_I188803.cc @@ -1,87 +1,88 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_1983_I188803 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1983_I188803); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) vetoEvent; // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr mult = bookScatter2D(2, 1, 1); + Scatter2DPtr mult; + book(mult, 2, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1983_I188803); } diff --git a/analyses/pluginCESR/CLEO_1983_I188805.cc b/analyses/pluginCESR/CLEO_1983_I188805.cc --- a/analyses/pluginCESR/CLEO_1983_I188805.cc +++ b/analyses/pluginCESR/CLEO_1983_I188805.cc @@ -1,86 +1,87 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_1983_I188805 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1983_I188805); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) vetoEvent; // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { // R double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(1,1,1); + Scatter2DPtr hadrons; + book(hadrons, 1,1,1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); } else { hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1983_I188805); } diff --git a/analyses/pluginCESR/CLEO_1984_I193577.cc b/analyses/pluginCESR/CLEO_1984_I193577.cc --- a/analyses/pluginCESR/CLEO_1984_I193577.cc +++ b/analyses/pluginCESR/CLEO_1984_I193577.cc @@ -1,98 +1,101 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_1984_I193577 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1984_I193577); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1984_I193577); } diff --git a/analyses/pluginCESR/CLEO_1991_I29927.cc b/analyses/pluginCESR/CLEO_1991_I29927.cc --- a/analyses/pluginCESR/CLEO_1991_I29927.cc +++ b/analyses/pluginCESR/CLEO_1991_I29927.cc @@ -1,99 +1,100 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class CLEO_1991_I29927 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1991_I29927); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(), "UFS"); // Book histograms - _c_B = bookCounter("/TMP/sigma_B"); - _c_Bstar = bookCounter("/TMP/sigma_Bstar"); + book(_c_B, "/TMP/sigma_B"); + book(_c_Bstar, "/TMP/sigma_Bstar"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& ufs = apply(event, "UFS"); unsigned int nBstar(0); // Get Bottom hadrons const Particles bhads = filter_select(ufs.particles(), isBottomHadron); // find the Bstars for (const Particle& p : bhads) { - if(abs(p.pdgId())==513 || abs(p.pdgId())==523) { - if(!p.hasDescendantWith(Cuts::pid == p.pdgId())) ++nBstar; + if(abs(p.pid())==513 || abs(p.pid())==523) { + if(!p.hasDescendantWith(Cuts::pid == p.pid())) ++nBstar; } } if(!bhads.empty()) - _c_B->fill(event.weight()); + _c_B->fill(); if(nBstar!=0) - _c_Bstar->fill(nBstar*event.weight()); + _c_Bstar->fill(nBstar); } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /picobarn; for(unsigned int ix=1;ix<3;++ix) { double sig(0.),err(0.); if(ix==1) { sig = _c_B->val()*fact; err = _c_B->err()*fact; } else { sig = _c_Bstar->val()*fact; err = _c_Bstar->err()*fact; } Scatter2D temphisto(refData(ix, 1, 1)); - Scatter2DPtr mult = bookScatter2D(ix, 1, 1); + Scatter2DPtr mult; + book(mult, ix, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sig, ex, make_pair(err,err)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _c_B, _c_Bstar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1991_I29927); } diff --git a/analyses/pluginCESR/CLEO_1998_I445351.cc b/analyses/pluginCESR/CLEO_1998_I445351.cc --- a/analyses/pluginCESR/CLEO_1998_I445351.cc +++ b/analyses/pluginCESR/CLEO_1998_I445351.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_1998_I445351 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1998_I445351); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1998_I445351); } diff --git a/analyses/pluginCESR/CLEO_1999_I474676.cc b/analyses/pluginCESR/CLEO_1999_I474676.cc --- a/analyses/pluginCESR/CLEO_1999_I474676.cc +++ b/analyses/pluginCESR/CLEO_1999_I474676.cc @@ -1,124 +1,125 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_1999_I474676 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1999_I474676); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(FinalState(), "FS"); declare(UnstableParticles(), "UFS"); - _nUps2pipi = bookCounter("TMP/nUps2pipi"); - _nUps3pipi = bookCounter("TMP/nUps3pipi"); + book(_nUps2pipi, "TMP/nUps2pipi"); + book(_nUps3pipi, "TMP/nUps3pipi"); } void findChildren(const Particle & p,map & nRes, int &ncount) { - foreach(const Particle &child, p.children()) { + for (const Particle &child : p.children()) { if(child.children().empty()) { - --nRes[child.pdgId()]; + --nRes[child.pid()]; --ncount; } else findChildren(child,nRes,ncount); } } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } const FinalState& ufs = apply(event, "UFS"); - foreach (const Particle& p, ufs.particles()) { + for (const Particle& p : ufs.particles()) { if(p.children().empty()) continue; - if(p.pdgId() != 100553 && - p.pdgId() != 200553 ) continue; + if(p.pid() != 100553 && + p.pid() != 200553 ) continue; map nRes = nCount; int ncount = ntotal; findChildren(p,nRes,ncount); if(ncount!=2) continue; bool matched = true; for(auto const & val : nRes) { if(abs(val.first)==211) { if(val.second!=1) { matched = false; break; } } else if(val.second!=0) { matched = false; break; } } if(matched) { - if(p.pdgId()==100553) - _nUps2pipi->fill(event.weight()); - if(p.pdgId()==200553) - _nUps3pipi->fill(event.weight()); + if(p.pid()==100553) + _nUps2pipi->fill(); + if(p.pid()==200553) + _nUps3pipi->fill(); } } } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /picobarn; for(unsigned int ix=1;ix<3;++ix) { double sigma,error; if(ix==1) { sigma = _nUps3pipi->val()*fact; error = _nUps3pipi->err()*fact; } else if(ix==2) { sigma = _nUps2pipi->val()*fact; error = _nUps2pipi->err()*fact; } Scatter2D temphisto(refData(1, 1, ix)); - Scatter2DPtr mult = bookScatter2D(1, 1, ix); + Scatter2DPtr mult; + book(mult, 1, 1, ix); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _nUps2pipi,_nUps3pipi; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1999_I474676); } diff --git a/analyses/pluginCESR/CLEO_1999_I508944.cc b/analyses/pluginCESR/CLEO_1999_I508944.cc --- a/analyses/pluginCESR/CLEO_1999_I508944.cc +++ b/analyses/pluginCESR/CLEO_1999_I508944.cc @@ -1,101 +1,101 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_1999_I508944 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_1999_I508944); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(UnstableParticles(), "UFS"); - _hist_pipi = bookHisto1D( 1, 1, 1); + book(_hist_pipi, 1, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Find the taus Particles taus; const UnstableParticles& ufs = apply(event, "UFS"); - foreach (const Particle& p, ufs.particles()) { + for (const Particle& p : ufs.particles()) { if (p.abspid() != PID::TAU) continue; Particles pip, pim, pi0; unsigned int nstable = 0; // get the boost to the rest frame // find the decay products we want findDecayProducts(p, nstable, pip, pim, pi0); if (p.pid() < 0) { swap(pip, pim); } if (nstable != 3) continue; // pipi if (pim.size() == 1 && pi0.size() == 1) - _hist_pipi->fill((pi0[0].momentum()+pim[0].momentum()).mass(),event.weight()); + _hist_pipi->fill((pi0[0].momentum()+pim[0].momentum()).mass()); } } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles& pip, Particles& pim, Particles& pi0) { for(const Particle & p : mother.children()) { - long id = p.pdgId(); + long id = p.pid(); if (id == PID::PI0 ) { pi0.push_back(p); ++nstable; } else if (id == PID::PIPLUS) { pip.push_back(p); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::K0S || id == PID::KPLUS || id == PID::KMINUS) { ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p, nstable, pip, pim, pi0); } else ++nstable; } } /// Normalise histograms etc., after the run void finalize() { normalize(_hist_pipi); } //@} /// @name Histograms //@{ Histo1DPtr _hist_pipi; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_1999_I508944); } diff --git a/analyses/pluginCESR/CLEO_2006_I691720.cc b/analyses/pluginCESR/CLEO_2006_I691720.cc --- a/analyses/pluginCESR/CLEO_2006_I691720.cc +++ b/analyses/pluginCESR/CLEO_2006_I691720.cc @@ -1,223 +1,224 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_2006_I691720 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_2006_I691720); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); declare(UnstableParticles(), "UFS"); // Book histograms for(unsigned int ix=1;ix<18;++ix) { stringstream ss; ss << "TMP/n" << ix; - _nMeson[ix]= bookCounter(ss.str()); + book(_nMeson[ix], ss.str()); } } void findChildren(const Particle & p,map & nRes, int &ncount) { - foreach(const Particle &child, p.children()) { + for (const Particle &child : p.children()) { if(child.children().empty()) { - nRes[child.pdgId()]-=1; + nRes[child.pid()]-=1; --ncount; } else findChildren(child,nRes,ncount); } } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } if(ntotal==3) { if(nCount[211]==1 && nCount[-211]==1 && nCount[111]==1) - _nMeson[1]->fill(event.weight()); + _nMeson[1]->fill(); } const FinalState& ufs = apply(event, "UFS"); for(unsigned int ix=0;ix nRes = nCount; int ncount = ntotal; findChildren(p1,nRes,ncount); - if(p1.pdgId()==113 || p1.pdgId()==223 || p1.pdgId()==10113) { + if(p1.pid()==113 || p1.pid()==223 || p1.pid()==10113) { if(ncount!=1) continue; bool matched = true; for(auto const & val : nRes) { if(abs(val.first)==111 && val.second!=1) { matched = false; break; } else if(val.second!=0) { matched = false; break; } } if(matched) { - if(p1.pdgId()==113) { - _nMeson[2]->fill(event.weight()); - _nMeson[3]->fill(event.weight()); + if(p1.pid()==113) { + _nMeson[2]->fill(); + _nMeson[3]->fill(); } - else if(p1.pdgId()==223) { - _nMeson[5]->fill(event.weight()); + else if(p1.pid()==223) { + _nMeson[5]->fill(); } - else if(p1.pdgId()==10113) { - _nMeson[15]->fill(event.weight()); + else if(p1.pid()==10113) { + _nMeson[15]->fill(); } } } - else if(p1.pdgId()==abs(113) || abs(p1.pdgId())==10113) { + else if(p1.pid()==abs(113) || abs(p1.pid())==10113) { if(ncount!=1) continue; bool matched = true; - int ipi = p1.pdgId()>0 ? -211 : 211; + int ipi = p1.pid()>0 ? -211 : 211; for(auto const & val : nRes) { if(abs(val.first)==ipi && val.second!=1) { matched = false; break; } else if(val.second!=0) { matched = false; break; } } if(matched) { - if(p1.pdgId()==abs(113)) { - _nMeson[2]->fill(event.weight()); - _nMeson[4]->fill(event.weight()); + if(p1.pid()==abs(113)) { + _nMeson[2]->fill(); + _nMeson[4]->fill(); } else { - _nMeson[15]->fill(event.weight()); - _nMeson[17]->fill(event.weight()); + _nMeson[15]->fill(); + _nMeson[17]->fill(); } } } - else if(p1.pdgId()==abs(323)) { + else if(p1.pid()==abs(323)) { if(ncount!=1) continue; bool matched = true; - int iK = p1.pdgId()==323 ? -321 : 321; + int iK = p1.pid()==323 ? -321 : 321; for(auto const & val : nRes) { if(abs(val.first)==iK && val.second!=1) { matched = false; break; } else if(val.second!=0) { matched = false; break; } } if(matched) { - _nMeson[14]->fill(event.weight()); + _nMeson[14]->fill(); } } // second unstable particle - if(abs(p1.pdgId())!=313 && p1.pdgId()!=113 && p1.pdgId()!=223 && - p1.pdgId()!=333 && p1.pdgId()!=221 && p1.pdgId()!=331) + if(abs(p1.pid())!=313 && p1.pid()!=113 && p1.pid()!=223 && + p1.pid()!=333 && p1.pid()!=221 && p1.pid()!=331) continue; for(unsigned int iy=ix+1;iy nRes2 = nRes; int ncount2 = ncount; findChildren(p2,nRes2,ncount2); if(ncount!=0) continue; bool matched=true; for(auto const & val : nRes2) { if(val.second!=0) { matched = false; break; } } if(!matched) continue; - if( (p1.pdgId()==113 && p2.pdgId()==221) || - (p2.pdgId()==113 && p1.pdgId()==221) ) - _nMeson[7]->fill(event.weight()); - else if( (p1.pdgId()==223 && p2.pdgId()==221) || - (p2.pdgId()==223 && p1.pdgId()==221) ) - _nMeson[8]->fill(event.weight()); - else if( (p1.pdgId()==333 && p2.pdgId()==221) || - (p2.pdgId()==333 && p1.pdgId()==221) ) - _nMeson[9]->fill(event.weight()); - else if( (p1.pdgId()==113 && p2.pdgId()==331) || - (p2.pdgId()==113 && p1.pdgId()==331) ) - _nMeson[10]->fill(event.weight()); - else if( (p1.pdgId()==223 && p2.pdgId()==331) || - (p2.pdgId()==223 && p1.pdgId()==331) ) - _nMeson[11]->fill(event.weight()); - else if( (p1.pdgId()==333 && p2.pdgId()==331) || - (p2.pdgId()==333 && p1.pdgId()==331) ) - _nMeson[12]->fill(event.weight()); - else if( (p1.pdgId()==313 && p2.pdgId()==-313) || - (p2.pdgId()==313 && p1.pdgId()==-313) ) - _nMeson[13]->fill(event.weight()); + if( (p1.pid()==113 && p2.pid()==221) || + (p2.pid()==113 && p1.pid()==221) ) + _nMeson[7]->fill(); + else if( (p1.pid()==223 && p2.pid()==221) || + (p2.pid()==223 && p1.pid()==221) ) + _nMeson[8]->fill(); + else if( (p1.pid()==333 && p2.pid()==221) || + (p2.pid()==333 && p1.pid()==221) ) + _nMeson[9]->fill(); + else if( (p1.pid()==113 && p2.pid()==331) || + (p2.pid()==113 && p1.pid()==331) ) + _nMeson[10]->fill(); + else if( (p1.pid()==223 && p2.pid()==331) || + (p2.pid()==223 && p1.pid()==331) ) + _nMeson[11]->fill(); + else if( (p1.pid()==333 && p2.pid()==331) || + (p2.pid()==333 && p1.pid()==331) ) + _nMeson[12]->fill(); + else if( (p1.pid()==313 && p2.pid()==-313) || + (p2.pid()==313 && p1.pid()==-313) ) + _nMeson[13]->fill(); } } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=1;ix<18;++ix) { if(ix==6||ix==16) continue; double sigma = _nMeson[ix]->val(); double error = _nMeson[ix]->err(); sigma *= crossSection()/ sumOfWeights() /picobarn; error *= crossSection()/ sumOfWeights() /picobarn; Scatter2D temphisto(refData(1, 1, ix)); - Scatter2DPtr mult = bookScatter2D(1, 1, ix); + Scatter2DPtr mult; + book(mult, 1, 1, ix); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _nMeson[18]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_2006_I691720); } diff --git a/analyses/pluginCESR/CLEO_2006_I700665.cc b/analyses/pluginCESR/CLEO_2006_I700665.cc --- a/analyses/pluginCESR/CLEO_2006_I700665.cc +++ b/analyses/pluginCESR/CLEO_2006_I700665.cc @@ -1,87 +1,88 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_2006_I700665 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_2006_I700665); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(FinalState(), "FS"); - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); unsigned int nCharged(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; - if(PID::isCharged(p.pdgId())) ++nCharged; + if(PID::isCharged(p.pid())) ++nCharged; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) vetoEvent; // everything else else { - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); } else { hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_2006_I700665); } diff --git a/analyses/pluginCESR/CLEO_2007_I753556.cc b/analyses/pluginCESR/CLEO_2007_I753556.cc --- a/analyses/pluginCESR/CLEO_2007_I753556.cc +++ b/analyses/pluginCESR/CLEO_2007_I753556.cc @@ -1,101 +1,104 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CLEO_2007_I753556 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CLEO_2007_I753556); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CLEO_2007_I753556); } diff --git a/analyses/pluginCESR/CUSB_1982_I180613.cc b/analyses/pluginCESR/CUSB_1982_I180613.cc --- a/analyses/pluginCESR/CUSB_1982_I180613.cc +++ b/analyses/pluginCESR/CUSB_1982_I180613.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CUSB_1982_I180613 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CUSB_1982_I180613); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(3, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(3, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 3, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CUSB_1982_I180613); } diff --git a/analyses/pluginCMS/CMS_2013_I1256943.cc b/analyses/pluginCMS/CMS_2013_I1256943.cc --- a/analyses/pluginCMS/CMS_2013_I1256943.cc +++ b/analyses/pluginCMS/CMS_2013_I1256943.cc @@ -1,182 +1,182 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// CMS cross-section and angular correlations in Z boson + b-hadrons events at 7 TeV class CMS_2013_I1256943 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1256943); /// Add projections and book histograms void init() { book(_sumW, "sumW"); book(_sumW50, "sumW50"); book(_sumWpT, "sumWpT"); FinalState fs(Cuts::abseta < 2.4 && Cuts::pT > 20*GeV); declare(fs, "FS"); UnstableParticles ufs(Cuts::abseta < 2 && Cuts::pT > 15*GeV); declare(ufs, "UFS"); Cut zetacut = Cuts::abseta < 2.4; ZFinder zfindermu(fs, zetacut, PID::MUON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES, 91.2*GeV); declare(zfindermu, "ZFinderMu"); ZFinder zfinderel(fs, zetacut, PID::ELECTRON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::ClusterPhotons::NONE, ZFinder::AddPhotons::YES, 91.2*GeV); declare(zfinderel, "ZFinderEl"); // Histograms in non-boosted region of Z pT book(_h_dR_BB ,1, 1, 1); book(_h_dphi_BB ,2, 1, 1); book(_h_min_dR_ZB ,3, 1, 1); book(_h_A_ZBB ,4, 1, 1); // Histograms in boosted region of Z pT (pT > 50 GeV) book(_h_dR_BB_boost ,5, 1, 1); book(_h_dphi_BB_boost ,6, 1, 1); book(_h_min_dR_ZB_boost ,7, 1, 1); book(_h_A_ZBB_boost ,8, 1, 1); book(_h_min_ZpT ,9,1,1); } /// Do the analysis void analyze(const Event& e) { const UnstableParticles& ufs = apply(e, "UFS"); const ZFinder& zfindermu = apply(e, "ZFinderMu"); const ZFinder& zfinderel = apply(e, "ZFinderEl"); // Look for a Z --> mu+ mu- event in the final state if (zfindermu.empty() && zfinderel.empty()) vetoEvent; const Particles& z = !zfindermu.empty() ? zfindermu.bosons() : zfinderel.bosons(); const bool is_boosted = ( z[0].pT() > 50*GeV ); // Loop over the unstable particles vector Bmom; for (const Particle& p : ufs.particles()) { const PdgId pid = p.pid(); // Look for particles with a bottom quark if (PID::hasBottom(pid)) { bool good_B = false; - const GenParticle* pgen = p.genParticle(); - const GenVertex* vgen = pgen -> end_vertex(); + ConstGenParticlePtr pgen = p.genParticle(); + ConstGenVertexPtr vgen = pgen -> end_vertex(); // Loop over the decay products of each unstable particle, looking for a b-hadron pair /// @todo Avoid HepMC API for (ConstGenParticlePtr it: HepMCUtils::particles(vgen, Relatives::CHILDREN)){ // If the particle produced has a bottom quark do not count it and go to the next loop cycle. if (!( PID::hasBottom( it->pdg_id() ) ) ) { good_B = true; continue; } else { good_B = false; break; } } if (good_B ) Bmom.push_back( p.momentum() ); } else continue; } // If there are more than two B's in the final state veto the event if (Bmom.size() != 2 ) vetoEvent; // Calculate the observables double dphiBB = deltaPhi(Bmom[0], Bmom[1]); double dRBB = deltaR(Bmom[0], Bmom[1]); const FourMomentum& pZ = z[0].momentum(); const bool closest_B = ( deltaR(pZ, Bmom[0]) < deltaR(pZ, Bmom[1]) ); const double mindR_ZB = closest_B ? deltaR(pZ, Bmom[0]) : deltaR(pZ, Bmom[1]); const double maxdR_ZB = closest_B ? deltaR(pZ, Bmom[1]) : deltaR(pZ, Bmom[0]); const double AZBB = ( maxdR_ZB - mindR_ZB ) / ( maxdR_ZB + mindR_ZB ); // Fill the histograms in the non-boosted region _h_dphi_BB->fill(dphiBB); _h_dR_BB->fill(dRBB); _h_min_dR_ZB->fill(mindR_ZB); _h_A_ZBB->fill(AZBB); _sumW->fill(); _sumWpT->fill(); // Fill the histograms in the boosted region if (is_boosted) { _sumW50->fill(); _h_dphi_BB_boost->fill(dphiBB); _h_dR_BB_boost->fill(dRBB); _h_min_dR_ZB_boost->fill(mindR_ZB); _h_A_ZBB_boost->fill(AZBB); } // Fill Z pT (cumulative) histogram _h_min_ZpT->fill(0); if (pZ.pT() > 40*GeV ) { _sumWpT->fill(); _h_min_ZpT->fill(40); } if (pZ.pT() > 80*GeV ) { _sumWpT->fill(); _h_min_ZpT->fill(80); } if (pZ.pT() > 120*GeV ) { _sumWpT->fill(); _h_min_ZpT->fill(120); } Bmom.clear(); } /// Finalize void finalize() { // Normalize excluding overflow bins (d'oh) normalize(_h_dR_BB, 0.7*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d01-x01-y01 normalize(_h_dphi_BB, 0.53*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d02-x01-y01 normalize(_h_min_dR_ZB, 0.84*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d03-x01-y01 normalize(_h_A_ZBB, 0.2*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d04-x01-y01 normalize(_h_dR_BB_boost, 0.84*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d05-x01-y01 normalize(_h_dphi_BB_boost, 0.63*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d06-x01-y01 normalize(_h_min_dR_ZB_boost, 1*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d07-x01-y01 normalize(_h_A_ZBB_boost, 0.25*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d08-x01-y01 normalize(_h_min_ZpT, 40*crossSection()*dbl(*_sumWpT)/sumOfWeights(), false); // d09-x01-y01 } private: /// @name Weight counters //@{ CounterPtr _sumW, _sumW50, _sumWpT; //@} /// @name Histograms //@{ Histo1DPtr _h_dphi_BB, _h_dR_BB, _h_min_dR_ZB, _h_A_ZBB; Histo1DPtr _h_dphi_BB_boost, _h_dR_BB_boost, _h_min_dR_ZB_boost, _h_A_ZBB_boost, _h_min_ZpT; //@} }; DECLARE_RIVET_PLUGIN(CMS_2013_I1256943); } diff --git a/analyses/pluginCMS/CMS_2016_I1486238.cc b/analyses/pluginCMS/CMS_2016_I1486238.cc --- a/analyses/pluginCMS/CMS_2016_I1486238.cc +++ b/analyses/pluginCMS/CMS_2016_I1486238.cc @@ -1,126 +1,125 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" -#include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/FastJets.hh" #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// Studies of 2 b-jet + 2 jet production in proton-proton collisions at 7 TeV class CMS_2016_I1486238 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1486238); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FastJets akt(FinalState(), FastJets::ANTIKT, 0.5); declare(akt, "antikT"); book(_h_Deltaphi_newway, 1,1,1); book(_h_deltaphiafterlight, 9,1,1); book(_h_SumPLight, 5,1,1); book(_h_LeadingBJetpt, 11,1,1); book(_h_SubleadingBJetpt, 15,1,1); book(_h_LeadingLightJetpt, 13,1,1); book(_h_SubleadingLightJetpt, 17,1,1); book(_h_LeadingBJeteta, 10,1,1); book(_h_SubleadingBJeteta, 14,1,1); book(_h_LeadingLightJeteta, 12,1,1); book(_h_SubleadingLightJeteta, 16,1,1); } /// Perform the per-event analysis void analyze(const Event& event) { const Jets& jets = apply(event, "antikT").jetsByPt(Cuts::absrap < 4.7 && Cuts::pT > 20*GeV); if (jets.size() < 4) vetoEvent; // Initial quarks /// @note Quark-level tagging... Particles bquarks; - for (const GenParticle* p : particles(event.genEvent())) { + for (ConstGenParticlePtr p : HepMCUtils::particles(event.genEvent())) { if (abs(p->pdg_id()) == PID::BQUARK) bquarks += Particle(p); } Jets bjets, ljets; for (const Jet& j : jets) { const bool btag = any(bquarks, deltaRLess(j, 0.3)); // for (const Particle& b : bquarks) if (deltaR(j, b) < 0.3) btag = true; (btag && j.abseta() < 2.4 ? bjets : ljets).push_back(j); } // Fill histograms const double weight = 1.0; if (bjets.size() >= 2 && ljets.size() >= 2) { _h_LeadingBJetpt->fill(bjets[0].pT()/GeV, weight); _h_SubleadingBJetpt->fill(bjets[1].pT()/GeV, weight); _h_LeadingLightJetpt->fill(ljets[0].pT()/GeV, weight); _h_SubleadingLightJetpt->fill(ljets[1].pT()/GeV, weight); // _h_LeadingBJeteta->fill(bjets[0].eta(), weight); _h_SubleadingBJeteta->fill(bjets[1].eta(), weight); _h_LeadingLightJeteta->fill(ljets[0].eta(), weight); _h_SubleadingLightJeteta->fill(ljets[1].eta(), weight); const double lightdphi = deltaPhi(ljets[0], ljets[1]); _h_deltaphiafterlight->fill(lightdphi, weight); const double vecsumlightjets = sqrt(sqr(ljets[0].px()+ljets[1].px()) + sqr(ljets[0].py()+ljets[1].py())); //< @todo Just (lj0+lj1).pT()? Or use add_quad const double term2 = vecsumlightjets/(sqrt(sqr(ljets[0].px()) + sqr(ljets[0].py())) + sqrt(sqr(ljets[1].px()) + sqr(ljets[1].py()))); //< @todo lj0.pT() + lj1.pT()? Or add_quad _h_SumPLight->fill(term2, weight); const double pxBsyst2 = bjets[0].px()+bjets[1].px(); // @todo (bj0+bj1).px() const double pyBsyst2 = bjets[0].py()+bjets[1].py(); // @todo (bj0+bj1).py() const double pxJetssyst2 = ljets[0].px()+ljets[1].px(); // @todo (lj0+lj1).px() const double pyJetssyst2 = ljets[0].py()+ljets[1].py(); // @todo (lj0+lj1).py() const double modulusB2 = sqrt(sqr(pxBsyst2)+sqr(pyBsyst2)); //< @todo add_quad const double modulusJets2 = sqrt(sqr(pxJetssyst2)+sqr(pyJetssyst2)); //< @todo add_quad const double cosphiBsyst2 = pxBsyst2/modulusB2; const double cosphiJetssyst2 = pxJetssyst2/modulusJets2; const double phiBsyst2 = ((pyBsyst2 > 0) ? 1 : -1) * acos(cosphiBsyst2); //< @todo sign(pyBsyst2) const double phiJetssyst2 = sign(pyJetssyst2) * acos(cosphiJetssyst2); const double Dphi2 = deltaPhi(phiBsyst2, phiJetssyst2); _h_Deltaphi_newway->fill(Dphi2,weight); } } /// Normalise histograms etc., after the run void finalize() { const double invlumi = crossSection()/picobarn/sumOfWeights(); normalize({_h_SumPLight, _h_deltaphiafterlight, _h_Deltaphi_newway}); scale({_h_LeadingLightJetpt, _h_SubleadingLightJetpt, _h_LeadingBJetpt, _h_SubleadingBJetpt}, invlumi); scale({_h_LeadingLightJeteta, _h_SubleadingLightJeteta, _h_LeadingBJeteta, _h_SubleadingBJeteta}, invlumi); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_deltaphiafterlight, _h_Deltaphi_newway, _h_SumPLight; Histo1DPtr _h_LeadingBJetpt, _h_SubleadingBJetpt, _h_LeadingLightJetpt, _h_SubleadingLightJetpt; Histo1DPtr _h_LeadingBJeteta, _h_SubleadingBJeteta, _h_LeadingLightJeteta, _h_SubleadingLightJeteta; }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1486238); } diff --git a/analyses/pluginCMS/CMS_2016_I1487288.cc b/analyses/pluginCMS/CMS_2016_I1487288.cc --- a/analyses/pluginCMS/CMS_2016_I1487288.cc +++ b/analyses/pluginCMS/CMS_2016_I1487288.cc @@ -1,121 +1,121 @@ #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/WFinder.hh" namespace Rivet { /// @brief WZ production cross section in pp collisions at 7 and 8 TeV class CMS_2016_I1487288 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1487288); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { FinalState fs(Cuts::abseta < 4.9); - FastJets fj(fs, FastJets::ANTIKT, 0.5, JetAlg::ALL_MUONS, JetAlg::DECAY_INVISIBLES); + FastJets fj(fs, FastJets::ANTIKT, 0.5, JetAlg::Muons::ALL, JetAlg::Invisibles::DECAY); declare(fj, "Jets"); ZFinder zeeFinder(fs, Cuts::abseta < 2.5 && Cuts::pT > 20*GeV, PID::ELECTRON, 71*GeV, 111*GeV); declare(zeeFinder, "Zee"); ZFinder zmumuFinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::MUON, 71*GeV, 111*GeV); declare(zmumuFinder, "Zmumu"); WFinder weFinder(fs, Cuts::abseta < 2.5 && Cuts::pT > 20*GeV, PID::ELECTRON, 60*GeV, 100*GeV, 30*GeV); declare(weFinder, "We"); WFinder wmuFinder(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::MUON, 60*GeV, 100*GeV, 30*GeV); declare(wmuFinder, "Wmu"); - _h_ZpT = bookHisto1D("d03-x01-y01"); - _h_Njet = bookHisto1D("d04-x01-y01", {-0.5, 0.5, 1.5, 2.5, 3.5}); ///< @todo Ref data has null bin widths - _h_JpT = bookHisto1D("d05-x01-y01"); + book(_h_ZpT, "d03-x01-y01"); + book(_h_Njet, "d04-x01-y01", {-0.5, 0.5, 1.5, 2.5, 3.5}); ///< @todo Ref data has null bin widths + book(_h_JpT, "d05-x01-y01"); } /// Perform the per-event analysis void analyze(const Event& event) { // Find Z -> l+ l- const ZFinder& zeeFS = apply(event, "Zee"); const ZFinder& zmumuFS = apply(event, "Zmumu"); const Particles zlls = zeeFS.bosons() + zmumuFS.bosons(); if (zlls.empty()) vetoEvent; // Next find the W const WFinder& weFS = apply(event, "We"); const WFinder& wmuFS = apply(event, "Wmu"); const Particles wls = weFS.bosons() + wmuFS.bosons(); if (wls.empty()) vetoEvent; // If more than one Z candidate, use the one with Mll nearest to MZ const Particles zlls_mz = sortBy(zlls, [](const Particle& a, const Particle& b){ return fabs(a.mass() - 91.2*GeV) < fabs(b.mass() - 91.2*GeV); }); const Particle& Z = zlls_mz.front(); // const bool isZee = any(Z.constituents(), hasAbsPID(PID::ELECTRON)); // If more than one Z candidate, use the one with Mll nearest to MZ const Particles wls_mw = sortBy(wls, [](const Particle& a, const Particle& b){ return fabs(a.mass() - 80.4*GeV) < fabs(b.mass() - 80.4*GeV); }); const Particle& W = wls_mw.front(); // const bool isWe = any(W.constituents(), hasAbsPID(PID::ELECTRON)); // Isolate W and Z charged leptons from each other for (const Particle& lw : W.constituents()) { if (lw.charge3() == 0) continue; for (const Particle& lz : Z.constituents()) { if (deltaR(lw, lz) < 0.1) vetoEvent; } } // Fill Z pT histogram - _h_ZpT->fill(Z.pT()/GeV, event.weight()); + _h_ZpT->fill(Z.pT()/GeV); // Isolate jets from W and Z charged leptons const Particles wzleps = filter_select(W.constituents()+Z.constituents(), isChLepton); const Jets& jets = apply("Jets", event).jetsByPt(Cuts::pT > 30*GeV and Cuts::abseta < 2.5); const Jets isojets = discardIfAnyDeltaRLess(jets, wzleps, 0.5); // Fill jet histograms - _h_Njet->fill(isojets.size(), event.weight()); - if (!isojets.empty()) _h_JpT->fill(isojets[0].pT()/GeV, event.weight()); + _h_Njet->fill(isojets.size()); + if (!isojets.empty()) _h_JpT->fill(isojets[0].pT()/GeV); } /// Normalise histograms etc., after the run void finalize() { // Total cross-section is corrected for BR(W->lnu), BR(Z->ll), leptonic-tau fraction f_tau = 6.5-7.6%, // and unpublished detector/acceptance signal efficiencies epsilon_sig. Fix to published value: valid for shape comparison only const double xsec8tev = 24.09; // picobarn; normalize(_h_ZpT, xsec8tev); normalize(_h_Njet, xsec8tev); normalize(_h_JpT, xsec8tev); } private: /// Histogram Histo1DPtr _h_ZpT, _h_Njet, _h_JpT; }; DECLARE_RIVET_PLUGIN(CMS_2016_I1487288); } diff --git a/analyses/pluginCMS/CMS_2016_I1491950.cc b/analyses/pluginCMS/CMS_2016_I1491950.cc --- a/analyses/pluginCMS/CMS_2016_I1491950.cc +++ b/analyses/pluginCMS/CMS_2016_I1491950.cc @@ -1,558 +1,488 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/PromptFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { class CMS_2016_I1491950 : public Analysis { public: /// Constructor CMS_2016_I1491950() : Analysis("CMS_2016_I1491950") { } /// Book histograms and initialise projections before the run void init() { FinalState fs(Cuts::pT > 0. && Cuts::abseta < 6.); PromptFinalState prompt_fs(fs); prompt_fs.acceptMuonDecays(true); prompt_fs.acceptTauDecays(true); // Projection for dressed electrons and muons Cut leptonCuts = Cuts::abseta < 2.5 and Cuts::pt > 30.*GeV; SpecialDressedLeptons dressedleptons(prompt_fs, leptonCuts); declare(dressedleptons, "DressedLeptons"); // Neutrinos IdentifiedFinalState neutrinos(prompt_fs); neutrinos.acceptNeutrinos(); declare(neutrinos, "Neutrinos"); // Projection for jets VetoedFinalState fsForJets(fs); fsForJets.addVetoOnThisFinalState(dressedleptons); fsForJets.addVetoOnThisFinalState(neutrinos); declare(FastJets(fsForJets, FastJets::ANTIKT, 0.4, JetAlg::Muons::DECAY, JetAlg::Invisibles::DECAY), "Jets"); //book hists book(_hist_thadpt, "d01-x02-y01"); book(_hist_thady, "d03-x02-y01"); book(_hist_tleppt, "d05-x02-y01"); book(_hist_tlepy, "d07-x02-y01"); book(_hist_ttpt, "d09-x02-y01"); book(_hist_tty, "d13-x02-y01"); book(_hist_ttm, "d11-x02-y01"); book(_hist_njet, "d15-x02-y01"); book(_hist_njets_thadpt_1, "d17-x02-y01"); book(_hist_njets_thadpt_2, "d18-x02-y01"); book(_hist_njets_thadpt_3, "d19-x02-y01"); book(_hist_njets_thadpt_4, "d20-x02-y01"); book(_hist_njets_ttpt_1, "d22-x02-y01"); book(_hist_njets_ttpt_2, "d23-x02-y01"); book(_hist_njets_ttpt_3, "d24-x02-y01"); book(_hist_njets_ttpt_4, "d25-x02-y01"); book(_hist_thady_thadpt_1, "d27-x02-y01"); book(_hist_thady_thadpt_2, "d28-x02-y01"); book(_hist_thady_thadpt_3, "d29-x02-y01"); book(_hist_thady_thadpt_4, "d30-x02-y01"); book(_hist_ttm_tty_1, "d32-x02-y01"); book(_hist_ttm_tty_2, "d33-x02-y01"); book(_hist_ttm_tty_3, "d34-x02-y01"); book(_hist_ttm_tty_4, "d35-x02-y01"); book(_hist_ttpt_ttm_1, "d37-x02-y01"); book(_hist_ttpt_ttm_2, "d38-x02-y01"); book(_hist_ttpt_ttm_3, "d39-x02-y01"); book(_hist_ttpt_ttm_4, "d40-x02-y01"); book(_histnorm_thadpt, "d42-x02-y01"); book(_histnorm_thady, "d44-x02-y01"); book(_histnorm_tleppt, "d46-x02-y01"); book(_histnorm_tlepy, "d48-x02-y01"); book(_histnorm_ttpt, "d50-x02-y01"); book(_histnorm_tty, "d54-x02-y01"); book(_histnorm_ttm, "d52-x02-y01"); book(_histnorm_njet, "d56-x02-y01"); book(_histnorm_njets_thadpt_1, "d58-x02-y01"); book(_histnorm_njets_thadpt_2, "d59-x02-y01"); book(_histnorm_njets_thadpt_3, "d60-x02-y01"); book(_histnorm_njets_thadpt_4, "d61-x02-y01"); book(_histnorm_njets_ttpt_1, "d63-x02-y01"); book(_histnorm_njets_ttpt_2, "d64-x02-y01"); book(_histnorm_njets_ttpt_3, "d65-x02-y01"); book(_histnorm_njets_ttpt_4, "d66-x02-y01"); book(_histnorm_thady_thadpt_1, "d68-x02-y01"); book(_histnorm_thady_thadpt_2, "d69-x02-y01"); book(_histnorm_thady_thadpt_3, "d70-x02-y01"); book(_histnorm_thady_thadpt_4, "d71-x02-y01"); book(_histnorm_ttm_tty_1, "d73-x02-y01"); book(_histnorm_ttm_tty_2, "d74-x02-y01"); book(_histnorm_ttm_tty_3, "d75-x02-y01"); book(_histnorm_ttm_tty_4, "d76-x02-y01"); book(_histnorm_ttpt_ttm_1, "d78-x02-y01"); book(_histnorm_ttpt_ttm_2, "d79-x02-y01"); book(_histnorm_ttpt_ttm_3, "d80-x02-y01"); book(_histnorm_ttpt_ttm_4, "d81-x02-y01"); } /// Perform the per-event analysis void analyze(const Event& event) { // leptons const SpecialDressedLeptons& dressedleptons_proj = applyProjection(event, "DressedLeptons"); std::vector dressedLeptons = dressedleptons_proj.dressedLeptons(); if(dressedLeptons.size() != 1) return; // neutrinos const Particles neutrinos = apply(event, "Neutrinos").particlesByPt(); _nusum = FourMomentum(0., 0., 0., 0.); for(const Particle& neutrino : neutrinos) _nusum += neutrino.momentum(); _wl = _nusum + dressedLeptons[0].momentum(); // jets Cut jet_cut = (Cuts::abseta < 2.5) and (Cuts::pT > 25.*GeV); const Jets jets = apply(event, "Jets").jetsByPt(jet_cut); Jets allJets; for (const Jet& jet : jets) { allJets.push_back(jet); } Jets bJets; for (const Jet& jet : allJets) { if (jet.bTagged()) bJets.push_back(jet); } if(bJets.size() < 2 || allJets.size() < 4) return; //construct top quark proxies double Kmin = numeric_limits::max(); for(const Jet& itaj : allJets) { for(const Jet& itbj : allJets) { if (itaj.momentum() == itbj.momentum()) continue; FourMomentum wh(itaj.momentum() + itbj.momentum()); for(const Jet& ithbj : bJets) { if(itaj.momentum() == ithbj.momentum() || itbj.momentum() == ithbj.momentum()) continue; FourMomentum th(wh + ithbj.momentum()); for(const Jet& itlbj : bJets) { if(itaj.momentum() == itlbj.momentum() || itbj.momentum() == itlbj.momentum() || ithbj.momentum() == itlbj.momentum()) continue; FourMomentum tl(_wl + itlbj.momentum()); double K = pow(wh.mass() - 80.4, 2) + pow(th.mass() - 172.5, 2) + pow(tl.mass() - 172.5, 2); if(K < Kmin) { Kmin = K; _tl = tl; _th = th; _wh = wh; } } } } } _hist_thadpt->fill(_th.pt()); _hist_thady->fill(abs(_th.rapidity()) ); _hist_tleppt->fill(_tl.pt() ); _hist_tlepy->fill(abs(_tl.rapidity()) ); _histnorm_thadpt->fill(_th.pt()); _histnorm_thady->fill(abs(_th.rapidity()) ); _histnorm_tleppt->fill(_tl.pt() ); _histnorm_tlepy->fill(abs(_tl.rapidity()) ); FourMomentum tt(_tl+_th); _hist_ttpt->fill(tt.pt() ); _hist_tty->fill(abs(tt.rapidity()) ); _hist_ttm->fill(tt.mass() ); _hist_njet->fill(min(allJets.size()-4., 4.)); _histnorm_ttpt->fill(tt.pt() ); _histnorm_tty->fill(abs(tt.rapidity()) ); _histnorm_ttm->fill(tt.mass() ); _histnorm_njet->fill(min(allJets.size()-4., 4.)); if(allJets.size() == 4) { _hist_njets_thadpt_1->fill(_th.pt()); _hist_njets_ttpt_1->fill(tt.pt()); _histnorm_njets_thadpt_1->fill(_th.pt()); _histnorm_njets_ttpt_1->fill(tt.pt()); } else if(allJets.size() == 5) { _hist_njets_thadpt_2->fill(_th.pt()); _hist_njets_ttpt_2->fill(tt.pt()); _histnorm_njets_thadpt_2->fill(_th.pt()); _histnorm_njets_ttpt_2->fill(tt.pt()); } else if(allJets.size() == 6) { _hist_njets_thadpt_3->fill(_th.pt()); _hist_njets_ttpt_3->fill(tt.pt()); _histnorm_njets_thadpt_3->fill(_th.pt()); _histnorm_njets_ttpt_3->fill(tt.pt()); } else //>= 4 jets { _hist_njets_thadpt_4->fill(_th.pt()); _hist_njets_ttpt_4->fill(tt.pt()); _histnorm_njets_thadpt_4->fill(_th.pt()); _histnorm_njets_ttpt_4->fill(tt.pt()); } if(abs(_th.rapidity()) < 0.5) { _hist_thady_thadpt_1->fill(_th.pt()); _histnorm_thady_thadpt_1->fill(_th.pt()); } else if(abs(_th.rapidity()) < 1.0) { _hist_thady_thadpt_2->fill(_th.pt()); _histnorm_thady_thadpt_2->fill(_th.pt()); } else if(abs(_th.rapidity()) < 1.5) { _hist_thady_thadpt_3->fill(_th.pt()); _histnorm_thady_thadpt_3->fill(_th.pt()); } else if(abs(_th.rapidity()) < 2.5) { _hist_thady_thadpt_4->fill(_th.pt()); _histnorm_thady_thadpt_4->fill(_th.pt()); } if(tt.mass() >= 300. && tt.mass() < 450.) { _hist_ttm_tty_1->fill(abs(tt.rapidity())); _histnorm_ttm_tty_1->fill(abs(tt.rapidity())); } else if(tt.mass() >= 450. && tt.mass() < 625.) { _hist_ttm_tty_2->fill(abs(tt.rapidity())); _histnorm_ttm_tty_2->fill(abs(tt.rapidity())); } else if(tt.mass() >= 625. && tt.mass() < 850.) { _hist_ttm_tty_3->fill(abs(tt.rapidity())); _histnorm_ttm_tty_3->fill(abs(tt.rapidity())); } else if(tt.mass() >= 850. && tt.mass() < 2000.) { _hist_ttm_tty_4->fill(abs(tt.rapidity())); _histnorm_ttm_tty_4->fill(abs(tt.rapidity())); } if(tt.pt() < 35.) { _hist_ttpt_ttm_1->fill(tt.mass()); _histnorm_ttpt_ttm_1->fill(tt.mass()); } else if(tt.pt() < 80.) { _hist_ttpt_ttm_2->fill(tt.mass()); _histnorm_ttpt_ttm_2->fill(tt.mass()); } else if(tt.pt() < 140.) { _hist_ttpt_ttm_3->fill(tt.mass()); _histnorm_ttpt_ttm_3->fill(tt.mass()); } else if(tt.pt() < 500.) { _hist_ttpt_ttm_4->fill(tt.mass()); _histnorm_ttpt_ttm_4->fill(tt.mass()); } } /// Normalise histograms etc., after the run void finalize() { scale(_hist_thadpt, crossSection()/sumOfWeights()); scale(_hist_thady, crossSection()/sumOfWeights()); scale(_hist_tleppt, crossSection()/sumOfWeights()); scale(_hist_tlepy, crossSection()/sumOfWeights()); scale(_hist_ttpt, crossSection()/sumOfWeights()); scale(_hist_tty, crossSection()/sumOfWeights()); scale(_hist_ttm, crossSection()/sumOfWeights()); scale(_hist_njet, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_1, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_2, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_3, crossSection()/sumOfWeights()); scale(_hist_njets_thadpt_4, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_1, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_2, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_3, crossSection()/sumOfWeights()); scale(_hist_njets_ttpt_4, crossSection()/sumOfWeights()); scale(_hist_thady_thadpt_1, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_2, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_3, crossSection()/sumOfWeights()/0.5); scale(_hist_thady_thadpt_4, crossSection()/sumOfWeights()/1.0); scale(_hist_ttm_tty_1, crossSection()/sumOfWeights()/150.); scale(_hist_ttm_tty_2, crossSection()/sumOfWeights()/175.); scale(_hist_ttm_tty_3, crossSection()/sumOfWeights()/225.); scale(_hist_ttm_tty_4, crossSection()/sumOfWeights()/1150.); scale(_hist_ttpt_ttm_1, crossSection()/sumOfWeights()/35.); scale(_hist_ttpt_ttm_2, crossSection()/sumOfWeights()/45.); scale(_hist_ttpt_ttm_3, crossSection()/sumOfWeights()/60.); scale(_hist_ttpt_ttm_4, crossSection()/sumOfWeights()/360.); scale(_histnorm_thadpt, 1./_histnorm_thadpt->sumW(false)); scale(_histnorm_thady, 1./_histnorm_thady->sumW(false)); scale(_histnorm_tleppt, 1./_histnorm_tleppt->sumW(false)); scale(_histnorm_tlepy, 1./_histnorm_tlepy->sumW(false)); scale(_histnorm_ttpt, 1./_histnorm_ttpt->sumW(false)); scale(_histnorm_tty, 1./_histnorm_tty->sumW(false)); scale(_histnorm_ttm, 1./_histnorm_ttm->sumW(false)); scale(_histnorm_njet, 1./_histnorm_njet->sumW(false)); double sum_njets_thadpt = _histnorm_njets_thadpt_1->sumW(false) + _histnorm_njets_thadpt_2->sumW(false) + _histnorm_njets_thadpt_3->sumW(false) + _histnorm_njets_thadpt_4->sumW(false); scale(_histnorm_njets_thadpt_1, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_2, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_3, 1./sum_njets_thadpt); scale(_histnorm_njets_thadpt_4, 1./sum_njets_thadpt); double sum_njets_ttpt = _histnorm_njets_ttpt_1->sumW(false) + _histnorm_njets_ttpt_2->sumW(false) + _histnorm_njets_ttpt_3->sumW(false) + _histnorm_njets_ttpt_4->sumW(false); scale(_histnorm_njets_ttpt_1, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_2, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_3, 1./sum_njets_ttpt); scale(_histnorm_njets_ttpt_4, 1./sum_njets_ttpt); double sum_thady_thadpt = _histnorm_thady_thadpt_1->sumW(false) + _histnorm_thady_thadpt_2->sumW(false) + _histnorm_thady_thadpt_3->sumW(false) + _histnorm_thady_thadpt_4->sumW(false); scale(_histnorm_thady_thadpt_1, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_2, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_3, 1./sum_thady_thadpt/0.5); scale(_histnorm_thady_thadpt_4, 1./sum_thady_thadpt/1.0); double sum_ttm_tty = _histnorm_ttm_tty_1->sumW(false) + _histnorm_ttm_tty_2->sumW(false) + _histnorm_ttm_tty_3->sumW(false) + _histnorm_ttm_tty_4->sumW(false); scale(_histnorm_ttm_tty_1, 1./sum_ttm_tty/150.); scale(_histnorm_ttm_tty_2, 1./sum_ttm_tty/175.); scale(_histnorm_ttm_tty_3, 1./sum_ttm_tty/225.); scale(_histnorm_ttm_tty_4, 1./sum_ttm_tty/1150.); double sum_ttpt_ttm = _histnorm_ttpt_ttm_1->sumW(false) + _histnorm_ttpt_ttm_2->sumW(false) + _histnorm_ttpt_ttm_3->sumW(false) + _histnorm_ttpt_ttm_4->sumW(false); scale(_histnorm_ttpt_ttm_1, 1./sum_ttpt_ttm/35.); scale(_histnorm_ttpt_ttm_2, 1./sum_ttpt_ttm/45.); scale(_histnorm_ttpt_ttm_3, 1./sum_ttpt_ttm/60.); scale(_histnorm_ttpt_ttm_4, 1./sum_ttpt_ttm/360.); } /// @brief Special dressed lepton finder /// /// Find dressed leptons by clustering all leptons and photons class SpecialDressedLeptons : public FinalState { public: /// The default constructor. May specify cuts SpecialDressedLeptons(const FinalState& fs, const Cut& cut) : FinalState(cut) { setName("SpecialDressedLeptons"); IdentifiedFinalState ifs(fs); ifs.acceptIdPair(PID::PHOTON); ifs.acceptIdPair(PID::ELECTRON); ifs.acceptIdPair(PID::MUON); - addProjection(ifs, "IFS"); - addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); + declare(ifs, "IFS"); + declare(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); } /// Clone on the heap. virtual unique_ptr clone() const { return unique_ptr(new SpecialDressedLeptons(*this)); } /// Retrieve the dressed leptons const vector& dressedLeptons() const { return _clusteredLeptons; } private: /// Container which stores the clustered lepton objects vector _clusteredLeptons; public: void project(const Event& e) { _theParticles.clear(); _clusteredLeptons.clear(); vector allClusteredLeptons; const Jets jets = applyProjection(e, "LeptonJets").jetsByPt(5.*GeV); for (const Jet& jet : jets) { Particle lepCand; for (const Particle& cand : jet.particles()) { - const int absPdgId = abs(cand.pdgId()); + const int absPdgId = abs(cand.pid()); if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) { if (cand.pt() > lepCand.pt()) lepCand = cand; } } //Central lepton must be the major component - if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue; + if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pid() == 0)) continue; DressedLepton lepton(lepCand); for (const Particle& cand : jet.particles()) { if (isSame(cand, lepCand)) continue; if (cand.pid() != PID::PHOTON) continue; lepton.addConstituent(cand, true); } allClusteredLeptons.push_back(lepton); } for (const DressedLepton& lepton : allClusteredLeptons) { if (accept(lepton)) { _clusteredLeptons.push_back(lepton); _theParticles.push_back(lepton.constituentLepton()); _theParticles += lepton.constituentPhotons(); } } } }; - - /// Normalise histograms etc., after the run - void finalize() - { - scale(_hist_thadpt, crossSection()/sumOfWeights()); - scale(_hist_thady, crossSection()/sumOfWeights()); - scale(_hist_tleppt, crossSection()/sumOfWeights()); - scale(_hist_tlepy, crossSection()/sumOfWeights()); - scale(_hist_ttpt, crossSection()/sumOfWeights()); - scale(_hist_tty, crossSection()/sumOfWeights()); - scale(_hist_ttm, crossSection()/sumOfWeights()); - scale(_hist_njet, crossSection()/sumOfWeights()); - scale(_hist_njets_thadpt_1, crossSection()/sumOfWeights()); - scale(_hist_njets_thadpt_2, crossSection()/sumOfWeights()); - scale(_hist_njets_thadpt_3, crossSection()/sumOfWeights()); - scale(_hist_njets_thadpt_4, crossSection()/sumOfWeights()); - scale(_hist_njets_ttpt_1, crossSection()/sumOfWeights()); - scale(_hist_njets_ttpt_2, crossSection()/sumOfWeights()); - scale(_hist_njets_ttpt_3, crossSection()/sumOfWeights()); - scale(_hist_njets_ttpt_4, crossSection()/sumOfWeights()); - scale(_hist_thady_thadpt_1, crossSection()/sumOfWeights()/0.5); - scale(_hist_thady_thadpt_2, crossSection()/sumOfWeights()/0.5); - scale(_hist_thady_thadpt_3, crossSection()/sumOfWeights()/0.5); - scale(_hist_thady_thadpt_4, crossSection()/sumOfWeights()/1.0); - scale(_hist_ttm_tty_1, crossSection()/sumOfWeights()/150.); - scale(_hist_ttm_tty_2, crossSection()/sumOfWeights()/175.); - scale(_hist_ttm_tty_3, crossSection()/sumOfWeights()/225.); - scale(_hist_ttm_tty_4, crossSection()/sumOfWeights()/1150.); - scale(_hist_ttpt_ttm_1, crossSection()/sumOfWeights()/35.); - scale(_hist_ttpt_ttm_2, crossSection()/sumOfWeights()/45.); - scale(_hist_ttpt_ttm_3, crossSection()/sumOfWeights()/60.); - scale(_hist_ttpt_ttm_4, crossSection()/sumOfWeights()/360.); - - scale(_histnorm_thadpt, 1./_histnorm_thadpt->sumW(false)); - scale(_histnorm_thady, 1./_histnorm_thady->sumW(false)); - scale(_histnorm_tleppt, 1./_histnorm_tleppt->sumW(false)); - scale(_histnorm_tlepy, 1./_histnorm_tlepy->sumW(false)); - scale(_histnorm_ttpt, 1./_histnorm_ttpt->sumW(false)); - scale(_histnorm_tty, 1./_histnorm_tty->sumW(false)); - scale(_histnorm_ttm, 1./_histnorm_ttm->sumW(false)); - scale(_histnorm_njet, 1./_histnorm_njet->sumW(false)); - double sum_njets_thadpt = _histnorm_njets_thadpt_1->sumW(false) + _histnorm_njets_thadpt_2->sumW(false) + _histnorm_njets_thadpt_3->sumW(false) + _histnorm_njets_thadpt_4->sumW(false); - scale(_histnorm_njets_thadpt_1, 1./sum_njets_thadpt); - scale(_histnorm_njets_thadpt_2, 1./sum_njets_thadpt); - scale(_histnorm_njets_thadpt_3, 1./sum_njets_thadpt); - scale(_histnorm_njets_thadpt_4, 1./sum_njets_thadpt); - double sum_njets_ttpt = _histnorm_njets_ttpt_1->sumW(false) + _histnorm_njets_ttpt_2->sumW(false) + _histnorm_njets_ttpt_3->sumW(false) + _histnorm_njets_ttpt_4->sumW(false); - scale(_histnorm_njets_ttpt_1, 1./sum_njets_ttpt); - scale(_histnorm_njets_ttpt_2, 1./sum_njets_ttpt); - scale(_histnorm_njets_ttpt_3, 1./sum_njets_ttpt); - scale(_histnorm_njets_ttpt_4, 1./sum_njets_ttpt); - double sum_thady_thadpt = _histnorm_thady_thadpt_1->sumW(false) + _histnorm_thady_thadpt_2->sumW(false) + _histnorm_thady_thadpt_3->sumW(false) + _histnorm_thady_thadpt_4->sumW(false); - scale(_histnorm_thady_thadpt_1, 1./sum_thady_thadpt/0.5); - scale(_histnorm_thady_thadpt_2, 1./sum_thady_thadpt/0.5); - scale(_histnorm_thady_thadpt_3, 1./sum_thady_thadpt/0.5); - scale(_histnorm_thady_thadpt_4, 1./sum_thady_thadpt/1.0); - double sum_ttm_tty = _histnorm_ttm_tty_1->sumW(false) + _histnorm_ttm_tty_2->sumW(false) + _histnorm_ttm_tty_3->sumW(false) + _histnorm_ttm_tty_4->sumW(false); - scale(_histnorm_ttm_tty_1, 1./sum_ttm_tty/150.); - scale(_histnorm_ttm_tty_2, 1./sum_ttm_tty/175.); - scale(_histnorm_ttm_tty_3, 1./sum_ttm_tty/225.); - scale(_histnorm_ttm_tty_4, 1./sum_ttm_tty/1150.); - double sum_ttpt_ttm = _histnorm_ttpt_ttm_1->sumW(false) + _histnorm_ttpt_ttm_2->sumW(false) + _histnorm_ttpt_ttm_3->sumW(false) + _histnorm_ttpt_ttm_4->sumW(false); - scale(_histnorm_ttpt_ttm_1, 1./sum_ttpt_ttm/35.); - scale(_histnorm_ttpt_ttm_2, 1./sum_ttpt_ttm/45.); - scale(_histnorm_ttpt_ttm_3, 1./sum_ttpt_ttm/60.); - scale(_histnorm_ttpt_ttm_4, 1./sum_ttpt_ttm/360.); - - } - - private: FourMomentum _tl; FourMomentum _th; FourMomentum _wl; FourMomentum _wh; FourMomentum _nusum; Histo1DPtr _hist_thadpt; Histo1DPtr _hist_thady; Histo1DPtr _hist_tleppt; Histo1DPtr _hist_tlepy; Histo1DPtr _hist_ttpt; Histo1DPtr _hist_tty; Histo1DPtr _hist_ttm; Histo1DPtr _hist_njet; Histo1DPtr _hist_njets_thadpt_1; Histo1DPtr _hist_njets_thadpt_2; Histo1DPtr _hist_njets_thadpt_3; Histo1DPtr _hist_njets_thadpt_4; Histo1DPtr _hist_njets_ttpt_1; Histo1DPtr _hist_njets_ttpt_2; Histo1DPtr _hist_njets_ttpt_3; Histo1DPtr _hist_njets_ttpt_4; Histo1DPtr _hist_thady_thadpt_1; Histo1DPtr _hist_thady_thadpt_2; Histo1DPtr _hist_thady_thadpt_3; Histo1DPtr _hist_thady_thadpt_4; Histo1DPtr _hist_ttm_tty_1; Histo1DPtr _hist_ttm_tty_2; Histo1DPtr _hist_ttm_tty_3; Histo1DPtr _hist_ttm_tty_4; Histo1DPtr _hist_ttpt_ttm_1; Histo1DPtr _hist_ttpt_ttm_2; Histo1DPtr _hist_ttpt_ttm_3; Histo1DPtr _hist_ttpt_ttm_4; Histo1DPtr _histnorm_thadpt; Histo1DPtr _histnorm_thady; Histo1DPtr _histnorm_tleppt; Histo1DPtr _histnorm_tlepy; Histo1DPtr _histnorm_ttpt; Histo1DPtr _histnorm_tty; Histo1DPtr _histnorm_ttm; Histo1DPtr _histnorm_njet; Histo1DPtr _histnorm_njets_thadpt_1; Histo1DPtr _histnorm_njets_thadpt_2; Histo1DPtr _histnorm_njets_thadpt_3; Histo1DPtr _histnorm_njets_thadpt_4; Histo1DPtr _histnorm_njets_ttpt_1; Histo1DPtr _histnorm_njets_ttpt_2; Histo1DPtr _histnorm_njets_ttpt_3; Histo1DPtr _histnorm_njets_ttpt_4; Histo1DPtr _histnorm_thady_thadpt_1; Histo1DPtr _histnorm_thady_thadpt_2; Histo1DPtr _histnorm_thady_thadpt_3; Histo1DPtr _histnorm_thady_thadpt_4; Histo1DPtr _histnorm_ttm_tty_1; Histo1DPtr _histnorm_ttm_tty_2; Histo1DPtr _histnorm_ttm_tty_3; Histo1DPtr _histnorm_ttm_tty_4; Histo1DPtr _histnorm_ttpt_ttm_1; Histo1DPtr _histnorm_ttpt_ttm_2; Histo1DPtr _histnorm_ttpt_ttm_3; Histo1DPtr _histnorm_ttpt_ttm_4; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_I1491950); } diff --git a/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc b/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc --- a/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc +++ b/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc @@ -1,176 +1,176 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" namespace Rivet { /// Jet multiplicity in lepton+jets ttbar at 8 TeV class CMS_2016_PAS_TOP_15_006 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_PAS_TOP_15_006); /// @name Analysis methods //@{ /// Set up projections and book histograms void init() { // Complete final state FinalState fs; Cut superLooseLeptonCuts = Cuts::pt > 5*GeV; SpecialDressedLeptons dressedleptons(fs, superLooseLeptonCuts); declare(dressedleptons, "DressedLeptons"); // Projection for jets VetoedFinalState fsForJets(fs); fsForJets.addVetoOnThisFinalState(dressedleptons); declare(FastJets(fsForJets, FastJets::ANTIKT, 0.5), "Jets"); // Booking of histograms book(_normedElectronMuonHisto, "normedElectronMuonHisto", 7, 3.5, 10.5, "Normalized differential cross section in lepton+jets channel", "Jet multiplicity", "Normed units"); book(_absXSElectronMuonHisto , "absXSElectronMuonHisto", 7, 3.5, 10.5, "Differential cross section in lepton+jets channel", "Jet multiplicity", "pb"); } /// Per-event analysis void analyze(const Event& event) { // Select ttbar -> lepton+jets const SpecialDressedLeptons& dressedleptons = applyProjection(event, "DressedLeptons"); vector selleptons; for (const DressedLepton& dressedlepton : dressedleptons.dressedLeptons()) { // Select good leptons if (dressedlepton.pT() > 30*GeV && dressedlepton.abseta() < 2.4) selleptons += dressedlepton.mom(); // Veto loose leptons else if (dressedlepton.pT() > 15*GeV && dressedlepton.abseta() < 2.5) vetoEvent; } if (selleptons.size() != 1) vetoEvent; // Identify hardest tight lepton const FourMomentum lepton = selleptons[0]; // Jets const FastJets& jets = applyProjection(event, "Jets"); const Jets jets30 = jets.jetsByPt(30*GeV); int nJets = 0, nBJets = 0; for (const Jet& jet : jets30) { if (jet.abseta() > 2.5) continue; if (deltaR(jet.momentum(), lepton) < 0.5) continue; nJets += 1; if (jet.bTagged(Cuts::pT > 5*GeV)) nBJets += 1; } // Require >= 4 resolved jets, of which two must be b-tagged if (nJets < 4 || nBJets < 2) vetoEvent; // Fill histograms _normedElectronMuonHisto->fill(min(nJets, 10)); _absXSElectronMuonHisto ->fill(min(nJets, 10)); } void finalize() { const double ttbarXS = !std::isnan(crossSectionPerEvent()) ? crossSection() : 252.89*picobarn; if (std::isnan(crossSectionPerEvent())) MSG_INFO("No valid cross-section given, using NNLO (arXiv:1303.6254; sqrt(s)=8 TeV, m_t=172.5 GeV): " << ttbarXS/picobarn << " pb"); const double xsPerWeight = ttbarXS/picobarn / sumOfWeights(); scale(_absXSElectronMuonHisto, xsPerWeight); normalize(_normedElectronMuonHisto); } //@} /// @brief Special dressed lepton finder /// /// Find dressed leptons by clustering all leptons and photons class SpecialDressedLeptons : public FinalState { public: /// Constructor SpecialDressedLeptons(const FinalState& fs, const Cut& cut) : FinalState(cut) { setName("SpecialDressedLeptons"); IdentifiedFinalState ifs(fs); ifs.acceptIdPair(PID::PHOTON); ifs.acceptIdPair(PID::ELECTRON); ifs.acceptIdPair(PID::MUON); - addProjection(ifs, "IFS"); - addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); + declare(ifs, "IFS"); + declare(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets"); } /// Clone on the heap virtual unique_ptr clone() const { return unique_ptr(new SpecialDressedLeptons(*this)); } /// Retrieve the dressed leptons const vector& dressedLeptons() const { return _clusteredLeptons; } /// Perform the calculation void project(const Event& e) { _theParticles.clear(); _clusteredLeptons.clear(); vector allClusteredLeptons; const Jets jets = applyProjection(e, "LeptonJets").jetsByPt(5*GeV); for (const Jet& jet : jets) { Particle lepCand; for (const Particle& cand : jet.particles()) { const int absPdgId = cand.abspid(); if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) { if (cand.pt() > lepCand.pt()) lepCand = cand; } } // Central lepton must be the major component - if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue; + if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pid() == 0)) continue; DressedLepton lepton = DressedLepton(lepCand); for (const Particle& cand : jet.particles()) { if (isSame(cand, lepCand)) continue; lepton.addConstituent(cand, true); } allClusteredLeptons.push_back(lepton); } for (const DressedLepton& lepton : allClusteredLeptons) { if (accept(lepton)) { _clusteredLeptons.push_back(lepton); _theParticles.push_back(lepton.constituentLepton()); _theParticles += lepton.constituentPhotons(); } } } private: /// Container which stores the clustered lepton objects vector _clusteredLeptons; }; private: /// Histograms Histo1DPtr _normedElectronMuonHisto, _absXSElectronMuonHisto; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CMS_2016_PAS_TOP_15_006); } diff --git a/analyses/pluginCMS/CMS_2018_I1686000.cc b/analyses/pluginCMS/CMS_2018_I1686000.cc --- a/analyses/pluginCMS/CMS_2018_I1686000.cc +++ b/analyses/pluginCMS/CMS_2018_I1686000.cc @@ -1,107 +1,107 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/DressedLeptons.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/MergedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/MissingMomentum.hh" #include "Rivet/Projections/PartonicTops.hh" #include "Rivet/Projections/PromptFinalState.hh" namespace Rivet { /// Fiducial single-top + photon cross-section measurement at 13 TeV class CMS_2018_I1686000 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2018_I1686000); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Leptons declare(DressedLeptons(PromptFinalState(), 0.1, Cuts::abseta < 2.4 && Cuts::pT > 26*GeV), "Leptons"); // Jets declare(FastJets(FinalState(Cuts::abseta < 5), FastJets::ANTIKT, 0.4), "Jets"); // Photons declare(PromptFinalState(Cuts::pid == PID::PHOTON && Cuts::pT > 25*GeV && Cuts::abseta < 1.44), "Photons"); // MET declare(MissingMomentum(FinalState(Cuts::abseta < 5)), "MET"); // Book xsec counter - _c_xsec_fid = bookCounter("xsec"); + book(_c_xsec_fid, "xsec"); } /// Perform the per-event analysis void analyze(const Event& event) { // // Find at least 2 jets, one b-tagged // const Jets jets = apply(event, "Jets").jetsByPt(Cuts::abseta < 4.7 && Cuts::pT > 40*GeV); // Jets bjets, ljets; // for (const Jet& j : jets) // ((j.abseta() < 2.5 && j.bTagged()) ? bjets : ljets) += j; // if (bjets.empty() || ljets.empty()) vetoEvent; // const Jet& bjet = bjets[0]; // const Jet& ljet = ljets[0]; // // Require exactly one isolated lepton, and it has to be a muon // const Particles& leps = apply(event, "Leptons").particlesByPt(); // const Particles isoleps = discardIfAnyDeltaRLess(leps, jets, 0.3); // if (isoleps.size() != 1 || isoleps[0].abspid() != PID::MUON) vetoEvent; // const Particle& muon = isoleps[0]; // // Require exactly one isolated photon // const Particles& photons = apply(event, "Photons").particlesByPt(); // const Particles muisophotons = filter_discard(photons, deltaRLess(muon,0.5)); // const Particles isophotons = discardIfAnyDeltaRLess(muisophotons, Jets{bjet,ljet}, 0.5); // if (isophotons.size() != 1) vetoEvent; // // Require 30 GeV of missing ET // const double met = apply(event, "MET").met(); // if (met < 30*GeV) vetoEvent; // Find light jets const Jets jets = apply(event, "Jets").jetsByPt(); const Jets ljets = filter_discard(jets, [](const Jet& j){ return j.abseta() < 2.5 && j.bTagged(); } ); // Require a photon, isolated from the light jet Particles photons = apply(event, "Photons").particlesByPt(); if (!ljets.empty()) ifilter_discard(photons, deltaRLess(ljets[0], 0.5)); if (photons.empty()) vetoEvent; // Fill counter - _c_xsec_fid->fill(event.weight()); + _c_xsec_fid->fill(); } /// Normalise histograms etc., after the run void finalize() { const double BRmu = 0.13 + 0.13*0.17; //< decay BR direct to a muon, and to a muon via a tau scale(_c_xsec_fid, BRmu*crossSection()/femtobarn/sumOfWeights()); } //@} /// Counter CounterPtr _c_xsec_fid; }; DECLARE_RIVET_PLUGIN(CMS_2018_I1686000); } diff --git a/analyses/pluginDORIS/ARGUS_1992_I319102.cc b/analyses/pluginDORIS/ARGUS_1992_I319102.cc --- a/analyses/pluginDORIS/ARGUS_1992_I319102.cc +++ b/analyses/pluginDORIS/ARGUS_1992_I319102.cc @@ -1,114 +1,117 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class ARGUS_1992_I319102 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ARGUS_1992_I319102); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms if(fuzzyEquals(sqrtS()/GeV, 10.47 , 1E-3)) - _h_N = bookHisto1D(2, 1, 1); + book(_h_N, 2, 1, 1); else if(fuzzyEquals(sqrtS()/GeV, 10.575, 1E-3)) - _h_N = bookHisto1D(3, 1, 1); + book(_h_N, 3, 1, 1); // counters for R - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); unsigned int nCharged(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; - if(PID::isCharged(p.pdgId())) ++nCharged; + if(PID::isCharged(p.pid())) ++nCharged; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else { - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); if(_h_N) - _h_N->fill(nCharged,event.weight()); + _h_N->fill(nCharged); } } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } if(_h_N) { normalize(_h_N,200.); } } //@} /// @name Histograms //@{ Histo1DPtr _h_N; CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ARGUS_1992_I319102); } diff --git a/analyses/pluginDORIS/ARGUS_1993_S2669951.cc b/analyses/pluginDORIS/ARGUS_1993_S2669951.cc --- a/analyses/pluginDORIS/ARGUS_1993_S2669951.cc +++ b/analyses/pluginDORIS/ARGUS_1993_S2669951.cc @@ -1,203 +1,198 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Production of the $\eta'(958)$ and $f_0(980)$ in $e^+e^-$ annihilation in the Upsilon region /// @author Peter Richardson class ARGUS_1993_S2669951 : public Analysis { public: ARGUS_1993_S2669951() - : Analysis("ARGUS_1993_S2669951"), - _count_etaPrime_highZ(2, 0.), - _count_etaPrime_allZ(3, 0.), - _count_f0(3, 0.), - _weightSum_cont(0.), - _weightSum_Ups1(0.), - _weightSum_Ups2(0.) - { } + : Analysis("ARGUS_1993_S2669951") { } void init() { declare(UnstableParticles(), "UFS"); book(_weightSum_cont, "weightSum_cont"); book(_weightSum_Ups1, "weightSum_Ups1"); book(_weightSum_Ups2, "weightSum_Ups2"); for ( auto i : {0,1,2} ) { if ( i < 2 ) book(_count_etaPrime_highZ[i], "count_etaPrime_highz_" + to_str(i)); book(_count_etaPrime_allZ[i], "count_etaPrime_allz_" + to_str(i)); book(_count_f0[i], "count_f0_" + to_str(i)); } book(_hist_cont_f0 ,2, 1, 1); book(_hist_Ups1_f0 ,3, 1, 1); book(_hist_Ups2_f0 ,4, 1, 1); - book(s111, 1, 1, 1, true); - book(s112, 1, 1, 2, true); - book(s511, 5, 1, 1, true); - } void analyze(const Event& e) { // Find the Upsilons among the unstables const UnstableParticles& ufs = apply(e, "UFS"); Particles upsilons; // First in unstable final state for (const Particle& p : ufs.particles()) if (p.pid() == 553 || p.pid() == 100553) upsilons.push_back(p); // Then in whole event if fails if (upsilons.empty()) { /// @todo Replace HepMC digging with Particle::descendents etc. calls for(ConstGenParticlePtr p: HepMCUtils::particles(e.genEvent())) { if ( p->pdg_id() != 553 && p->pdg_id() != 100553 ) continue; // Discard it if its parent has the same PDG ID code (avoid duplicates) ConstGenVertexPtr pv = p->production_vertex(); bool passed = true; if (pv) { for(ConstGenParticlePtr pp: HepMCUtils::particles(pv, Relatives::PARENTS)){ if ( p->pdg_id() == pp->pdg_id() ) { passed = false; break; } } } if (passed) upsilons.push_back(Particle(*p)); } } // Finding done, now fill counters if (upsilons.empty()) { // Continuum MSG_DEBUG("No Upsilons found => continuum event"); _weightSum_cont->fill(); unsigned int nEtaA(0), nEtaB(0), nf0(0); for (const Particle& p : ufs.particles()) { const int id = p.abspid(); const double xp = 2.*p.E()/sqrtS(); const double beta = p.p3().mod() / p.E(); if (id == 9010221) { _hist_cont_f0->fill(xp, 1.0/beta); nf0 += 1; } else if (id == 331) { if (xp > 0.35) nEtaA += 1; nEtaB += 1; } } _count_f0[2] ->fill(nf0); _count_etaPrime_highZ[1]->fill(nEtaA); _count_etaPrime_allZ[2] ->fill(nEtaB); } else { // Upsilon(s) found MSG_DEBUG("Upsilons found => resonance event"); for (const Particle& ups : upsilons) { const int parentId = ups.pid(); ((parentId == 553) ? _weightSum_Ups1 : _weightSum_Ups2)->fill(); Particles unstable; // Find the decay products we want findDecayProducts(ups.genParticle(), unstable); LorentzTransform cms_boost; if (ups.p3().mod() > 1*MeV) cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec()); const double mass = ups.mass(); unsigned int nEtaA(0), nEtaB(0), nf0(0); for(const Particle& p : unstable) { const int id = p.abspid(); const FourMomentum p2 = cms_boost.transform(p.momentum()); const double xp = 2.*p2.E()/mass; const double beta = p2.p3().mod()/p2.E(); if (id == 9010221) { //< ? ((parentId == 553) ? _hist_Ups1_f0 : _hist_Ups2_f0)->fill(xp, 1.0/beta); nf0 += 1; } else if (id == 331) { //< ? if (xp > 0.35) nEtaA += 1; nEtaB += 1; } } if (parentId == 553) { _count_f0[0] ->fill( nf0); _count_etaPrime_highZ[0]->fill(nEtaA); _count_etaPrime_allZ[0] ->fill(nEtaB); } else { _count_f0[1]->fill(nf0); _count_etaPrime_allZ[1] ->fill(nEtaB); } } } } void finalize() { // High-Z eta' multiplicity + Scatter2DPtr s111; + book(s111, 1, 1, 1, true); if (_weightSum_Ups1->val() > 0) // Point at 9.460 s111->point(0).setY(_count_etaPrime_highZ[0]->val() / _weightSum_Ups1->val(), 0); if (_weightSum_cont->val() > 0) // Point at 9.905 s111->point(1).setY(_count_etaPrime_highZ[1]->val() / _weightSum_cont->val(), 0); // All-Z eta' multiplicity + Scatter2DPtr s112; + book(s112, 1, 1, 2, true); if (_weightSum_Ups1->val() > 0) // Point at 9.460 s112->point(0).setY(_count_etaPrime_allZ[0]->val() / _weightSum_Ups1->val(), 0); if (_weightSum_cont->val() > 0) // Point at 9.905 s112->point(1).setY(_count_etaPrime_allZ[2]->val() / _weightSum_cont->val(), 0); if (_weightSum_Ups2->val() > 0) // Point at 10.02 s112->point(2).setY(_count_etaPrime_allZ[1]->val() / _weightSum_Ups2->val(), 0); // f0 multiplicity + Scatter2DPtr s511; + book(s511, 5, 1, 1, true); if (_weightSum_Ups1->val() > 0) // Point at 9.46 s511->point(0).setY(_count_f0[0]->val() / _weightSum_Ups1->val(), 0); if (_weightSum_Ups2->val() > 0) // Point at 10.02 s511->point(1).setY(_count_f0[1]->val() / _weightSum_Ups2->val(), 0); if (_weightSum_cont->val() > 0) // Point at 10.45 s511->point(2).setY(_count_f0[2]->val() / _weightSum_cont->val(), 0); // Scale histos if (_weightSum_cont->val() > 0.) scale(_hist_cont_f0, 1./ *_weightSum_cont); if (_weightSum_Ups1->val() > 0.) scale(_hist_Ups1_f0, 1./ *_weightSum_Ups1); if (_weightSum_Ups2->val() > 0.) scale(_hist_Ups2_f0, 1./ *_weightSum_Ups2); } private: /// @name Counters //@{ array _count_etaPrime_highZ, _count_etaPrime_allZ, _count_f0; CounterPtr _weightSum_cont,_weightSum_Ups1,_weightSum_Ups2; //@} /// Histos Histo1DPtr _hist_cont_f0, _hist_Ups1_f0, _hist_Ups2_f0; /// Recursively walk the HepMC tree to find decay products of @a p void findDecayProducts(ConstGenParticlePtr p, Particles& unstable) { ConstGenVertexPtr dv = p->end_vertex(); for (ConstGenParticlePtr pp: HepMCUtils::particles(dv, Relatives::CHILDREN)){ const int id = abs(pp->pdg_id()); if (id == 331 || id == 9010221) unstable.push_back(Particle(pp)); else if (pp->end_vertex()) findDecayProducts(pp, unstable); } } }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ARGUS_1993_S2669951); } diff --git a/analyses/pluginDORIS/CRYSTAL_BALL_1988_I261078.cc b/analyses/pluginDORIS/CRYSTAL_BALL_1988_I261078.cc --- a/analyses/pluginDORIS/CRYSTAL_BALL_1988_I261078.cc +++ b/analyses/pluginDORIS/CRYSTAL_BALL_1988_I261078.cc @@ -1,98 +1,101 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class CRYSTAL_BALL_1988_I261078 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(CRYSTAL_BALL_1988_I261078); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(CRYSTAL_BALL_1988_I261078); } diff --git a/analyses/pluginDORIS/DASP_1978_I129715.cc b/analyses/pluginDORIS/DASP_1978_I129715.cc --- a/analyses/pluginDORIS/DASP_1978_I129715.cc +++ b/analyses/pluginDORIS/DASP_1978_I129715.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class DASP_1978_I129715 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(DASP_1978_I129715); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DASP_1978_I129715); } diff --git a/analyses/pluginDORIS/DASP_1982_I178613.cc b/analyses/pluginDORIS/DASP_1982_I178613.cc --- a/analyses/pluginDORIS/DASP_1982_I178613.cc +++ b/analyses/pluginDORIS/DASP_1982_I178613.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class DASP_1982_I178613 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(DASP_1982_I178613); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(2, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(2, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 2, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DASP_1982_I178613); } diff --git a/analyses/pluginDORIS/DESY147_1978_I131524.cc b/analyses/pluginDORIS/DESY147_1978_I131524.cc --- a/analyses/pluginDORIS/DESY147_1978_I131524.cc +++ b/analyses/pluginDORIS/DESY147_1978_I131524.cc @@ -1,103 +1,106 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class DESY147_1978_I131524 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(DESY147_1978_I131524); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; for(unsigned int ix=1;ix<3;++ix) { Scatter2D temphisto(refData(ix, 1, 1)); - ostringstream title; + std::ostringstream title; title << "d0" << ix << "_sigma"; - Scatter2DPtr hadrons = bookScatter2D(title.str() + "_hadrons"); - Scatter2DPtr muons = bookScatter2D(title.str() + "_muons" ); - Scatter2DPtr mult = bookScatter2D(ix, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, title.str() + "_hadrons"); + Scatter2DPtr muons; + book(muons, title.str() + "_muons" ); + Scatter2DPtr mult; + book(mult, ix, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DESY147_1978_I131524); } diff --git a/analyses/pluginDORIS/DESY147_1980_I153896.cc b/analyses/pluginDORIS/DESY147_1980_I153896.cc --- a/analyses/pluginDORIS/DESY147_1980_I153896.cc +++ b/analyses/pluginDORIS/DESY147_1980_I153896.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class DESY147_1980_I153896 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(DESY147_1980_I153896); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DESY147_1980_I153896); } diff --git a/analyses/pluginDORIS/LENA_1982_I179431.cc b/analyses/pluginDORIS/LENA_1982_I179431.cc --- a/analyses/pluginDORIS/LENA_1982_I179431.cc +++ b/analyses/pluginDORIS/LENA_1982_I179431.cc @@ -1,103 +1,106 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class LENA_1982_I179431 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(LENA_1982_I179431); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; for(unsigned int ix=1;ix<4;++ix) { Scatter2D temphisto(refData(ix, 1, 1)); - ostringstream title; + std::ostringstream title; title << "d0" << ix << "_sigma"; - Scatter2DPtr hadrons = bookScatter2D(title.str() + "_hadrons"); - Scatter2DPtr muons = bookScatter2D(title.str() + "_muons" ); - Scatter2DPtr mult = bookScatter2D(ix, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, title.str() + "_hadrons"); + Scatter2DPtr muons; + book(muons, title.str() + "_muons" ); + Scatter2DPtr mult; + book(mult, ix, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LENA_1982_I179431); } diff --git a/analyses/pluginFrascati/BBAR_1980_I152630.cc b/analyses/pluginFrascati/BBAR_1980_I152630.cc --- a/analyses/pluginFrascati/BBAR_1980_I152630.cc +++ b/analyses/pluginFrascati/BBAR_1980_I152630.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class BBAR_1980_I152630 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BBAR_1980_I152630); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BBAR_1980_I152630); } diff --git a/analyses/pluginFrascati/FENICE_1994_I377833.cc b/analyses/pluginFrascati/FENICE_1994_I377833.cc --- a/analyses/pluginFrascati/FENICE_1994_I377833.cc +++ b/analyses/pluginFrascati/FENICE_1994_I377833.cc @@ -1,82 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class FENICE_1994_I377833 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(FENICE_1994_I377833); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); declare(UnstableParticles(), "UFS"); - _nProton= bookCounter( "/TMP/nProton" ); + book(_nProton, "/TMP/nProton" ); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); // total hadronic and muonic cross sections map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } if(ntotal==2 && nCount[2212]==1 && nCount[-2212]==1) - _nProton->fill(event.weight()); + _nProton->fill(); } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /nanobarn; double sigma = _nProton->val()*fact; double error = _nProton->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqr(sqrtS()/GeV), x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _nProton; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(FENICE_1994_I377833); } diff --git a/analyses/pluginFrascati/FENICE_1998_I471263.cc b/analyses/pluginFrascati/FENICE_1998_I471263.cc --- a/analyses/pluginFrascati/FENICE_1998_I471263.cc +++ b/analyses/pluginFrascati/FENICE_1998_I471263.cc @@ -1,82 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class FENICE_1998_I471263 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(FENICE_1998_I471263); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); declare(UnstableParticles(), "UFS"); - _nNeutron= bookCounter( "/TMP/nNeutron" ); + book(_nNeutron, "/TMP/nNeutron" ); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); // total hadronic and muonic cross sections map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } if(ntotal==2 && nCount[2112]==1 && nCount[-2112]==1) - _nNeutron->fill(event.weight()); + _nNeutron->fill(); } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /nanobarn; double sigma = _nNeutron->val()*fact; double error = _nNeutron->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _nNeutron; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(FENICE_1998_I471263); } diff --git a/analyses/pluginFrascati/GAMMAGAMMA_1973_I84794.cc b/analyses/pluginFrascati/GAMMAGAMMA_1973_I84794.cc --- a/analyses/pluginFrascati/GAMMAGAMMA_1973_I84794.cc +++ b/analyses/pluginFrascati/GAMMAGAMMA_1973_I84794.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class GAMMAGAMMA_1973_I84794 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(GAMMAGAMMA_1973_I84794); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(3, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(3,1,1); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(4,1,1); + Scatter2DPtr hadrons; + book(hadrons, 3,1,1); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 4,1,1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(GAMMAGAMMA_1973_I84794); } diff --git a/analyses/pluginFrascati/GAMMAGAMMA_1975_I100016.cc b/analyses/pluginFrascati/GAMMAGAMMA_1975_I100016.cc --- a/analyses/pluginFrascati/GAMMAGAMMA_1975_I100016.cc +++ b/analyses/pluginFrascati/GAMMAGAMMA_1975_I100016.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class GAMMAGAMMA_1975_I100016 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(GAMMAGAMMA_1975_I100016); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(1,1,1); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 2); + Scatter2DPtr hadrons; + book(hadrons, 1,1,1); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 2); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(GAMMAGAMMA_1975_I100016); } diff --git a/analyses/pluginFrascati/GAMMAGAMMA_1979_I133588.cc b/analyses/pluginFrascati/GAMMAGAMMA_1979_I133588.cc --- a/analyses/pluginFrascati/GAMMAGAMMA_1979_I133588.cc +++ b/analyses/pluginFrascati/GAMMAGAMMA_1979_I133588.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class GAMMAGAMMA_1979_I133588 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(GAMMAGAMMA_1979_I133588); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(1,1,2); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, 1,1,2); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(GAMMAGAMMA_1979_I133588); } diff --git a/analyses/pluginFrascati/GAMMAGAMMA_1979_I141722.cc b/analyses/pluginFrascati/GAMMAGAMMA_1979_I141722.cc --- a/analyses/pluginFrascati/GAMMAGAMMA_1979_I141722.cc +++ b/analyses/pluginFrascati/GAMMAGAMMA_1979_I141722.cc @@ -1,140 +1,144 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class GAMMAGAMMA_1979_I141722 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(GAMMAGAMMA_1979_I141722); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); - _c_charged = bookCounter("/TMP/Ncharged"); - _c_neutral = bookCounter("/TMP/Nneutral"); - _nHadrons = 0.; + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); + book(_c_charged, "/TMP/Ncharged"); + book(_c_neutral, "/TMP/Nneutral"); + book(_nHadrons, "/TMP/NHadrons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0),ncharged(0),nneutral(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; - if(PID::isCharged(p.pdgId())) + if(PID::isCharged(p.pid())) ncharged += 1; else nneutral += 1; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else { if(ntotal==2) vetoEvent; - _c_hadrons->fill(event.weight()); - _c_charged->fill(ncharged*event.weight()); - _c_neutral->fill(nneutral*event.weight()); - _nHadrons += event.weight(); + _c_hadrons->fill(); + _c_charged->fill(ncharged); + _c_neutral->fill(nneutral); + _nHadrons->fill(); } } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /picobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D("sigma_hadrons"); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, "sigma_hadrons"); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } - scale(_c_charged, 1./_nHadrons); - scale(_c_neutral, 1./_nHadrons); + scale(_c_charged, 1./_nHadrons->sumW()); + scale(_c_neutral, 1./_nHadrons->sumW()); for(unsigned int iy=1; iy<3;++iy) { double aver(0.),error(0.); if(iy==1) { aver = _c_charged->val(); error = _c_charged->err(); } else { aver = _c_neutral->val(); error = _c_neutral->err(); } Scatter2D temphisto(refData(2, 1, iy)); - Scatter2DPtr mult = bookScatter2D(2, 1, iy); + Scatter2DPtr mult; + book(mult, 2, 1, iy); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, aver, ex, make_pair(error,error)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons,_c_neutral,_c_charged; - double _nHadrons; + CounterPtr _nHadrons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(GAMMAGAMMA_1979_I141722); } diff --git a/analyses/pluginFrascati/GAMMAGAMMA_1980_I153382.cc b/analyses/pluginFrascati/GAMMAGAMMA_1980_I153382.cc --- a/analyses/pluginFrascati/GAMMAGAMMA_1980_I153382.cc +++ b/analyses/pluginFrascati/GAMMAGAMMA_1980_I153382.cc @@ -1,78 +1,79 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class GAMMAGAMMA_1980_I153382 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(GAMMAGAMMA_1980_I153382); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _npion = bookCounter("TMP/pion"); + book(_npion, "TMP/pion"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); if(fs.particles().size()!=4) vetoEvent; - foreach (const Particle& p, fs.particles()) { - if(abs(p.pdgId())!=PID::PIPLUS) vetoEvent; + for (const Particle& p : fs.particles()) { + if(abs(p.pid())!=PID::PIPLUS) vetoEvent; } - _npion->fill(event.weight()); + _npion->fill(); } /// Normalise histograms etc., after the run void finalize() { double sigma = _npion->val(); double error = _npion->err(); sigma *= crossSection()/ sumOfWeights() /nanobarn; error *= crossSection()/ sumOfWeights() /nanobarn; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr mult = bookScatter2D(1, 1, 1); + Scatter2DPtr mult; + book(mult, 1, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _npion; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(GAMMAGAMMA_1980_I153382); } diff --git a/analyses/pluginFrascati/GAMMAGAMMA_1981_I158474.cc b/analyses/pluginFrascati/GAMMAGAMMA_1981_I158474.cc --- a/analyses/pluginFrascati/GAMMAGAMMA_1981_I158474.cc +++ b/analyses/pluginFrascati/GAMMAGAMMA_1981_I158474.cc @@ -1,177 +1,180 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class GAMMAGAMMA_1981_I158474 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(GAMMAGAMMA_1981_I158474); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); - _n3pi = bookCounter("TMP/n3pi"); - _n4pi = bookCounter("TMP/n4pi"); - _n5pi = bookCounter("TMP/n5pi"); - _n6pi = bookCounter("TMP/n6pi"); - _n35pi = bookCounter("TMP/n35pi"); - _n46pi = bookCounter("TMP/n46pi"); - _nC2 = bookCounter("TMP/nC2"); - _nC4 = bookCounter("TMP/nC4"); - _nmu = bookCounter("TMP/nmu"); + book(_n3pi, "TMP/n3pi"); + book(_n4pi, "TMP/n4pi"); + book(_n5pi, "TMP/n5pi"); + book(_n6pi, "TMP/n6pi"); + book(_n35pi, "TMP/n35pi"); + book(_n46pi, "TMP/n46pi"); + book(_nC2, "TMP/nC2"); + book(_nC4, "TMP/nC4"); + book(_nmu, "TMP/nmu"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _nmu->fill(event.weight()); + _nmu->fill(); else { if(ntotal==3 && nCount[211] == 1 && nCount[-211]==1 && nCount[111]==1 ) { - _n3pi->fill(event.weight()); + _n3pi->fill(); } if(ntotal==4 && nCount[211] == 1 && nCount[-211]==1 && nCount[111]==2 ) { - _n4pi->fill(event.weight()); + _n4pi->fill(); } if(ntotal==5 && nCount[211] == 2 && nCount[-211]==2 && nCount[111]==1 ) { - _n5pi->fill(event.weight()); + _n5pi->fill(); } if(ntotal==6 && nCount[211] == 2 && nCount[-211]==2 && nCount[111]==2 ) { - _n6pi->fill(event.weight()); + _n6pi->fill(); } if(nCount[211] == 1 && nCount[-211]==1 && ntotal == 2+nCount[111]) { - _nC2->fill(event.weight()); + _nC2->fill(); } if(nCount[211] == 2 && nCount[-211]==2 && ntotal == 4+nCount[111]) { - _nC4->fill(event.weight()); + _nC4->fill(); } if((nCount[211]+nCount[-211]+nCount[111])==ntotal ) { if(ntotal==3 || ntotal ==5) - _n35pi->fill(event.weight()); + _n35pi->fill(); else if(ntotal==4 || ntotal==6) - _n46pi ->fill(event.weight()); + _n46pi ->fill(); } } } /// Normalise histograms etc., after the run void finalize() { double fact = crossSection()/ sumOfWeights() /nanobarn; for(unsigned int ix=1;ix<7;++ix) { double sigma,error; if(ix==1) { sigma = _n3pi->val()*fact; error = _n3pi->err()*fact; } else if(ix==2) { sigma = _n4pi->val()*fact; error = _n4pi->err()*fact; } else if(ix==3) { sigma = _n5pi->val()*fact; error = _n5pi->err()*fact; } else if(ix==4) { sigma = _n6pi->val()*fact; error = _n6pi->err()*fact; } else if(ix==5) { sigma = _n35pi->val()*fact; error = _n35pi->err()*fact; } else if(ix==6) { sigma = _n46pi->val()*fact; error = _n46pi->err()*fact; } Scatter2D temphisto(refData(1, 1, ix)); - Scatter2DPtr mult = bookScatter2D(1, 1, ix); + Scatter2DPtr mult; + book(mult, 1, 1, ix); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } for(unsigned int ix=1;ix<3;++ix) { Scatter1D R = (ix==1? *_nC2 : *_nC4)/ *_nmu; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double sig_h = (ix ==1 ? _nC2 : _nC4)->val()*fact; double err_h = (ix ==1 ? _nC2 : _nC4)->err()*fact; double sig_m = _nmu->val()*fact; double err_m = _nmu->err()*fact; Scatter2D temphisto(refData(2, 1, ix)); - ostringstream title; + std::ostringstream title; if(ix==1) title << "sigma_2pi"; else title << "sigma_4pi"; - Scatter2DPtr hadrons = bookScatter2D(title.str()); + Scatter2DPtr hadrons; + book(hadrons, title.str()); Scatter2DPtr muons; - if(ix==1) muons = bookScatter2D("sigma_muons"); - Scatter2DPtr mult = bookScatter2D(2,1,ix); + book(muons, "sigma_muons"); + Scatter2DPtr mult; + book(mult, 2,1,ix); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); if(ix==1) muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); if(ix==1) muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _n3pi,_n4pi,_n5pi,_n6pi,_n35pi,_n46pi,_nC2,_nC4,_nmu; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(GAMMAGAMMA_1981_I158474); } diff --git a/analyses/pluginFrascati/KLOE_2005_I655225.cc b/analyses/pluginFrascati/KLOE_2005_I655225.cc --- a/analyses/pluginFrascati/KLOE_2005_I655225.cc +++ b/analyses/pluginFrascati/KLOE_2005_I655225.cc @@ -1,79 +1,80 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class KLOE_2005_I655225 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(KLOE_2005_I655225); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _npion = bookCounter("TMP/pion"); + book(_npion, "TMP/pion"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); if(fs.particles().size()!=2) vetoEvent; - foreach (const Particle& p, fs.particles()) { - if(abs(p.pdgId())!=PID::PIPLUS) vetoEvent; + for (const Particle& p : fs.particles()) { + if(abs(p.pid())!=PID::PIPLUS) vetoEvent; } - _npion->fill(event.weight()); + _npion->fill(); } /// Normalise histograms etc., after the run void finalize() { double sigma = _npion->val(); double error = _npion->err(); sigma *= crossSection()/ sumOfWeights() /nanobarn; error *= crossSection()/ sumOfWeights() /nanobarn; Scatter2D temphisto(refData(2, 1, 1)); - Scatter2DPtr mult = bookScatter2D(2, 1, 1); + Scatter2DPtr mult; + book(mult, 2, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqr(sqrtS()/GeV), x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _npion; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(KLOE_2005_I655225); } diff --git a/analyses/pluginFrascati/KLOE_2008_I791841.cc b/analyses/pluginFrascati/KLOE_2008_I791841.cc --- a/analyses/pluginFrascati/KLOE_2008_I791841.cc +++ b/analyses/pluginFrascati/KLOE_2008_I791841.cc @@ -1,95 +1,96 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class KLOE_2008_I791841 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(KLOE_2008_I791841); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); - _n4pi = bookCounter("TMP/4pi"); - _n2pigamma = bookCounter("TMP/2pigamma"); + book(_n4pi, "TMP/4pi"); + book(_n2pigamma, "TMP/2pigamma"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } if(nCount[111]==2) { if( nCount[211] == 1 && nCount[-211] == 1 ) - _n4pi->fill(event.weight()); + _n4pi->fill(); else if( nCount[22] == 1) - _n2pigamma->fill(event.weight()); + _n2pigamma->fill(); } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=1;ix<3;++ix) { double sigma = 0., error = 0.; if(ix==1) { sigma = _n4pi->val(); error = _n4pi->err(); } else if(ix==2) { sigma = _n2pigamma->val(); error = _n2pigamma->err(); } sigma *= crossSection()/ sumOfWeights() /nanobarn; error *= crossSection()/ sumOfWeights() /nanobarn; Scatter2D temphisto(refData(ix, 1, 1)); - Scatter2DPtr mult = bookScatter2D(ix, 1, 1); + Scatter2DPtr mult; + book(mult, ix, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/MeV, x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} /// @name Histograms //@{ CounterPtr _n4pi,_n2pigamma; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(KLOE_2008_I791841); } diff --git a/analyses/pluginFrascati/KLOE_2009_I797438.cc b/analyses/pluginFrascati/KLOE_2009_I797438.cc --- a/analyses/pluginFrascati/KLOE_2009_I797438.cc +++ b/analyses/pluginFrascati/KLOE_2009_I797438.cc @@ -1,82 +1,83 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class KLOE_2009_I797438 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(KLOE_2009_I797438); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _npion = bookCounter("TMP/pion"); + book(_npion, "TMP/pion"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); if(fs.particles().size()!=2) vetoEvent; - foreach (const Particle& p, fs.particles()) { - if(abs(p.pdgId())!=PID::PIPLUS) vetoEvent; + for (const Particle& p : fs.particles()) { + if(abs(p.pid())!=PID::PIPLUS) vetoEvent; } - _npion->fill(event.weight()); + _npion->fill(); } /// Normalise histograms etc., after the run void finalize() { double sigma = _npion->val(); double error = _npion->err(); sigma *= crossSection()/ sumOfWeights() /nanobarn; error *= crossSection()/ sumOfWeights() /nanobarn; Scatter2D temphisto(refData(2, 1, 1)); - Scatter2DPtr mult = bookScatter2D(2, 1, 1); + Scatter2DPtr mult; + book(mult, 2, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqr(sqrtS()/GeV), x-ex2.first, x+ex2.second)) { mult->addPoint(x, sigma, ex, make_pair(error,error)); } else { mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _npion; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(KLOE_2009_I797438); } diff --git a/analyses/pluginFrascati/KLOE_2009_I818106.cc b/analyses/pluginFrascati/KLOE_2009_I818106.cc --- a/analyses/pluginFrascati/KLOE_2009_I818106.cc +++ b/analyses/pluginFrascati/KLOE_2009_I818106.cc @@ -1,98 +1,98 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class KLOE_2009_I818106 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(KLOE_2009_I818106); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(UnstableParticles(), "UFS"); - _h_etapi = bookHisto1D( 1, 1, 1); - _nPhi=0.; + book(_h_etapi, 1, 1, 1); + book(_nPhi, "TMP/PhiCounter"); } void findDecayProducts(const Particle & mother, unsigned int & nstable, unsigned int & neta, unsigned int & npi, unsigned int & ngamma, FourMomentum & ptot) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::ETA ) { ++neta; ++nstable; ptot += p.momentum(); } else if (id == PID::PI0) { ++npi; ++nstable; ptot += p.momentum(); } else if (id == PID::GAMMA) { ++ngamma; ++nstable; } else if (id == PID::PIPLUS || id == PID::PIMINUS) { ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p, nstable, neta, npi, ngamma, ptot); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over phis for(const Particle& phi : apply(event, "UFS").particles(Cuts::abspid==PID::PHI)) { - _nPhi+=event.weight(); + _nPhi->fill(); unsigned int nstable(0),neta(0),npi(0),ngamma(0); FourMomentum p_tot(0,0,0,0); findDecayProducts(phi, nstable, neta, npi, ngamma, p_tot); if(nstable!=3) continue; if(neta==1 && npi==1 && ngamma==1 ) { - _h_etapi->fill(p_tot.mass()/MeV, event.weight()); + _h_etapi->fill(p_tot.mass()/MeV); } } } /// Normalise histograms etc., after the run void finalize() { // normalise to total no of phi mesons // and mult by 10^7 due normalisation in paper - scale( _h_etapi, 1./_nPhi*1e7); + scale( _h_etapi, 1./_nPhi->sumW()*1e7); } //@} /// @name Histograms //@{ Histo1DPtr _h_etapi; - double _nPhi; + CounterPtr _nPhi; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(KLOE_2009_I818106); } diff --git a/analyses/pluginFrascati/MUPI_1972_I84978.cc b/analyses/pluginFrascati/MUPI_1972_I84978.cc --- a/analyses/pluginFrascati/MUPI_1972_I84978.cc +++ b/analyses/pluginFrascati/MUPI_1972_I84978.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class MUPI_1972_I84978 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MUPI_1972_I84978); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(1, 1, 1); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(2, 1, 1); + Scatter2DPtr hadrons; + book(hadrons, 1, 1, 1); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 2, 1, 1); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MUPI_1972_I84978); } diff --git a/analyses/pluginFrascati/MUPI_1973_I95215.cc b/analyses/pluginFrascati/MUPI_1973_I95215.cc --- a/analyses/pluginFrascati/MUPI_1973_I95215.cc +++ b/analyses/pluginFrascati/MUPI_1973_I95215.cc @@ -1,99 +1,102 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" namespace Rivet { /// @brief Add a short analysis description here class MUPI_1973_I95215 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MUPI_1973_I95215); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(FinalState(), "FS"); // Book histograms - _c_hadrons = bookCounter("/TMP/sigma_hadrons"); - _c_muons = bookCounter("/TMP/sigma_muons"); + book(_c_hadrons, "/TMP/sigma_hadrons"); + book(_c_muons, "/TMP/sigma_muons"); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState& fs = apply(event, "FS"); map nCount; int ntotal(0); - foreach (const Particle& p, fs.particles()) { - nCount[p.pdgId()] += 1; + for (const Particle& p : fs.particles()) { + nCount[p.pid()] += 1; ++ntotal; } // mu+mu- + photons if(nCount[-13]==1 and nCount[13]==1 && ntotal==2+nCount[22]) - _c_muons->fill(event.weight()); + _c_muons->fill(); // everything else else - _c_hadrons->fill(event.weight()); + _c_hadrons->fill(); } /// Normalise histograms etc., after the run void finalize() { Scatter1D R = *_c_hadrons/ *_c_muons; double rval = R.point(0).x(); pair rerr = R.point(0).xErrs(); double fact = crossSection()/ sumOfWeights() /nanobarn; double sig_h = _c_hadrons->val()*fact; double err_h = _c_hadrons->err()*fact; double sig_m = _c_muons ->val()*fact; double err_m = _c_muons ->err()*fact; Scatter2D temphisto(refData(1, 1, 1)); - Scatter2DPtr hadrons = bookScatter2D(1, 1, 1); - Scatter2DPtr muons = bookScatter2D("sigma_muons" ); - Scatter2DPtr mult = bookScatter2D(1, 1, 2); + Scatter2DPtr hadrons; + book(hadrons, 1, 1, 1); + Scatter2DPtr muons; + book(muons, "sigma_muons" ); + Scatter2DPtr mult; + book(mult, 1, 1, 2); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { mult ->addPoint(x, rval, ex, rerr); hadrons->addPoint(x, sig_h, ex, make_pair(err_h,err_h)); muons ->addPoint(x, sig_m, ex, make_pair(err_m,err_m)); } else { mult ->addPoint(x, 0., ex, make_pair(0.,.0)); hadrons->addPoint(x, 0., ex, make_pair(0.,.0)); muons ->addPoint(x, 0., ex, make_pair(0.,.0)); } } } //@} /// @name Histograms //@{ CounterPtr _c_hadrons, _c_muons; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MUPI_1973_I95215); } diff --git a/analyses/pluginHERA/H1_2007_I746380.cc b/analyses/pluginHERA/H1_2007_I746380.cc --- a/analyses/pluginHERA/H1_2007_I746380.cc +++ b/analyses/pluginHERA/H1_2007_I746380.cc @@ -1,545 +1,544 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DISKinematics.hh" #include "Rivet/Projections/DISFinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { namespace H1_2007_I746380_PROJECTIONS { /// Projection to find the largest gaps and the masses of the two /// systems separated by the gap. Based on the HZTools gap-finding /// method (hzhadgap.F). Note that gaps are found in the HCM frame. /// /// @author Christine O. Rasmussen. class RapidityGap : public Projection { public: /// Type of DIS boost to apply enum Frame { HCM, LAB, XCM }; RapidityGap() { setName("RapidityGap"); - addProjection(DISKinematics(), "DISKIN"); - addProjection(DISFinalState(DISFinalState::HCM), "DISFS"); + declare(DISKinematics(), "DISKIN"); + declare(DISFinalState(DISFinalState::BoostFrame::HCM), "DISFS"); } DEFAULT_RIVET_PROJ_CLONE(RapidityGap); const double M2X() const {return _M2X;} const double M2Y() const {return _M2Y;} const double t() const {return _t;} const double gap() const {return _gap;} const double gapUpp() const {return _gapUpp;} const double gapLow() const {return _gapLow;} const double EpPzX(Frame f) const { if (f == LAB) return _ePpzX_LAB; else if (f == XCM) return _ePpzX_XCM; else return _ePpzX_HCM; } const double EmPzX(Frame f) const { if (f == LAB) return _eMpzX_LAB; else if (f == XCM) return _eMpzX_XCM; else return _eMpzX_HCM; } const FourMomentum pX(Frame f) const { if (f == LAB) return _momX_LAB; else if (f == XCM) return _momX_XCM; else return _momX_HCM; } const FourMomentum pY(Frame f) const { if (f == LAB) return _momY_LAB; else if (f == XCM) return _momY_XCM; else return _momY_HCM; } const Particles& systemX(Frame f) const { if (f == LAB) return _pX_LAB; else if (f == XCM) return _pX_XCM; else return _pX_HCM; } const Particles& systemY(Frame f) const { if (f == LAB) return _pY_LAB; else if (f == XCM) return _pY_XCM; else return _pY_HCM; } protected: - virtual int compare(const Projection& p) const { + virtual CmpState compare(const Projection& p) const { const RapidityGap& other = pcast(p); return mkNamedPCmp(other, "DISKIN") || mkNamedPCmp(other, "DISFS"); } virtual void project(const Event& e){ const DISKinematics& dk = apply(e, "DISKIN"); const Particles& p = apply(e, "DISFS").particles(cmpMomByEta); findgap(p, dk); } void clearAll(){ _M2X = _M2Y = _t = _gap = 0.; _gapUpp = _gapLow = -8.; _ePpzX_HCM = _eMpzX_HCM =_ePpzX_LAB = _eMpzX_LAB = _ePpzX_XCM = _eMpzX_XCM = 0.; _momX_HCM.setPE(0., 0., 0., 0.); _momY_HCM.setPE(0., 0., 0., 0.); _momX_XCM.setPE(0., 0., 0., 0.); _momY_XCM.setPE(0., 0., 0., 0.); _momX_LAB.setPE(0., 0., 0., 0.); _momY_LAB.setPE(0., 0., 0., 0.); _pX_HCM.clear(); _pY_HCM.clear(); _pX_XCM.clear(); _pY_XCM.clear(); _pX_LAB.clear(); _pY_LAB.clear(); } void findgap(const Particles& particles, const DISKinematics& diskin){ clearAll(); // Begin by finding largest gap and gapedges between all final // state particles in HCM frame. int nP = particles.size(); int dir = diskin.orientation(); for (int i = 0; i < nP-1; ++i){ double tmpGap = abs(particles[i+1].eta() - particles[i].eta()); if (tmpGap > _gap) { _gap = tmpGap; _gapLow = (dir > 0) ? particles[i].eta() : dir * particles[i+1].eta(); _gapUpp = (dir > 0) ? particles[i+1].eta() : dir * particles[i].eta(); } } // Define the two systems X and Y. Particles tmp_pX, tmp_pY; - foreach (const Particle& ip, particles) { + for (const Particle& ip : particles) { if (dir * ip.eta() > _gapLow) tmp_pX.push_back(ip); else tmp_pY.push_back(ip); } Particles pX, pY; pX = (dir < 0) ? tmp_pY : tmp_pX; pY = (dir < 0) ? tmp_pX : tmp_pY; // Find variables related to HCM frame. // Note that HCM has photon along +z, as opposed to // H1 where proton is along +z. This results in a sign change // as compared to H1 papers! // X - side FourMomentum momX; - foreach (const Particle& jp, pX) { + for (const Particle& jp : pX) { momX += jp.momentum(); _ePpzX_HCM += jp.E() - jp.pz(); // Sign + => - _eMpzX_HCM += jp.E() + jp.pz(); // Sign - => + } _momX_HCM = momX; _pX_HCM = pX; _M2X = _momX_HCM.mass2(); // Y - side FourMomentum momY; - foreach (const Particle& kp, pY) momY += kp.momentum(); + for (const Particle& kp : pY) momY += kp.momentum(); _momY_HCM = momY; _pY_HCM = pY; _M2Y = _momY_HCM.mass2(); // Find variables related to LAB frame const LorentzTransform hcmboost = diskin.boostHCM(); const LorentzTransform hcminverse = hcmboost.inverse(); _momX_LAB = hcminverse.transform(_momX_HCM); _momY_LAB = hcminverse.transform(_momY_HCM); // Find momenta in XCM frame. Note that it is HCM frame that is // boosted, resulting in a sign change later! const bool doXCM = (momX.betaVec().mod2() < 1.); if (doXCM) { const LorentzTransform xcmboost = LorentzTransform::mkFrameTransformFromBeta(momX.betaVec()); _momX_XCM = xcmboost.transform(momX); _momY_XCM = xcmboost.transform(momY); } - foreach (const Particle& jp, pX) { + for (const Particle& jp : pX) { // Boost from HCM to LAB. FourMomentum lab = hcminverse.transform(jp.momentum()); _ePpzX_LAB += lab.E() + dir * lab.pz(); _eMpzX_LAB += lab.E() - dir * lab.pz(); Particle plab = jp; plab.setMomentum(lab); _pX_LAB.push_back(plab); // Set XCM. Note that since HCM frame is boosted to XCM frame, // we have a sign change if (doXCM) { const LorentzTransform xcmboost = LorentzTransform::mkFrameTransformFromBeta(_momX_HCM.betaVec()); FourMomentum xcm = xcmboost.transform(jp.momentum()); _ePpzX_XCM += xcm.E() - xcm.pz(); // Sign + => - _eMpzX_XCM += xcm.E() + xcm.pz(); // Sign - => + Particle pxcm = jp; pxcm.setMomentum(xcm); _pX_XCM.push_back(pxcm); } } - foreach (const Particle& jp, pY) { + for (const Particle& jp : pY) { // Boost from HCM to LAB FourMomentum lab = hcminverse.transform(jp.momentum()); Particle plab = jp; plab.setMomentum(lab); _pY_LAB.push_back(plab); // Boost from HCM to XCM if (doXCM) { const LorentzTransform xcmboost = LorentzTransform::mkFrameTransformFromBeta(_momX_HCM.betaVec()); FourMomentum xcm = xcmboost.transform(jp.momentum()); Particle pxcm = jp; pxcm.setMomentum(xcm); _pY_XCM.push_back(pxcm); } } // Find t: Currently can only handle gap on proton side. // @TODO: Expand to also handle gap on photon side // Boost p from LAB to HCM frame to find t. const FourMomentum proton = hcmboost.transform(diskin.beamHadron().momentum()); FourMomentum pPom = proton - _momY_HCM; _t = pPom * pPom; } private: double _M2X, _M2Y, _t; double _gap, _gapUpp, _gapLow; double _ePpzX_LAB, _eMpzX_LAB, _ePpzX_HCM, _eMpzX_HCM, _ePpzX_XCM, _eMpzX_XCM; FourMomentum _momX_HCM, _momY_HCM,_momX_LAB, _momY_LAB, _momX_XCM, _momY_XCM; Particles _pX_HCM, _pY_HCM, _pX_LAB, _pY_LAB, _pX_XCM, _pY_XCM; }; /// Projection to boost system X (photon+Pomeron) particles into its rest frame. /// /// @author Ilkka Helenius class BoostedXSystem : public FinalState { public: BoostedXSystem(const FinalState& fs) { setName("BoostedXSystem"); declare(fs,"FS"); - addProjection(RapidityGap(), "RAPGAP"); + declare(RapidityGap(), "RAPGAP"); } // Return the boost to XCM frame. const LorentzTransform& boost() const { return _boost; } DEFAULT_RIVET_PROJ_CLONE(BoostedXSystem); protected: // Apply the projection on the supplied event. void project(const Event& e){ const RapidityGap& rg = apply(e, "RAPGAP"); // Total momentum of the system X. const FourMomentum pX = rg.pX(RapidityGap::HCM); // Reset the boost. Is there a separate method for this? _boost = combine(_boost, _boost.inverse()); // Define boost only when numerically safe, otherwise negligible. if (pX.betaVec().mod2() < 1.) _boost = LorentzTransform::mkFrameTransformFromBeta(pX.betaVec()); // Boost the particles from system X. _theParticles.clear(); _theParticles.reserve(rg.systemX(RapidityGap::HCM).size()); for (const Particle& p : rg.systemX(RapidityGap::HCM)) { Particle temp = p; temp.setMomentum(_boost.transform(temp.momentum())); _theParticles.push_back(temp); } } // Compare projections. - int compare(const Projection& p) const { + CmpState compare(const Projection& p) const { const BoostedXSystem& other = pcast(p); return mkNamedPCmp(other, "RAPGAP") || mkNamedPCmp(other, "FS"); } private: LorentzTransform _boost; }; } /// @brief H1 diffractive dijets /// /// Diffractive dijets H1 with 920 GeV p and 27.5 GeV e /// Note tagged protons! /// /// @author Christine O. Rasmussen class H1_2007_I746380 : public Analysis { public: typedef H1_2007_I746380_PROJECTIONS::RapidityGap RapidityGap; typedef H1_2007_I746380_PROJECTIONS::BoostedXSystem BoostedXSystem; /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(H1_2007_I746380); /// @name Analysis methods //@{ // Book projections and histograms void init() { declare(DISKinematics(), "Kinematics"); - const DISFinalState& disfs = declare(DISFinalState(DISFinalState::HCM), "DISFS"); + const DISFinalState& disfs = declare(DISFinalState(DISFinalState::BoostFrame::HCM), "DISFS"); const BoostedXSystem& disfsXcm = declare( BoostedXSystem(disfs), "BoostedXFS"); declare(FastJets(disfsXcm, fastjet::JetAlgorithm::kt_algorithm, fastjet::RecombinationScheme::pt_scheme, 1.0, - JetAlg::ALL_MUONS, JetAlg::NO_INVISIBLES, nullptr), "DISFSJets"); + JetAlg::Muons::ALL, JetAlg::Invisibles::NONE, nullptr), "DISFSJets"); declare(RapidityGap(), "RapidityGap"); // Book histograms from REF data - _h_DIS_dsigdzPom = bookHisto1D(1, 1, 1); - _h_DIS_dsigdlogXpom = bookHisto1D(2, 1, 1); - _h_DIS_dsigdW = bookHisto1D(3, 1, 1); - _h_DIS_dsigdQ2 = bookHisto1D(4, 1, 1); - _h_DIS_dsigdEtJet1 = bookHisto1D(5, 1, 1); - _h_DIS_dsigdAvgEta = bookHisto1D(6, 1, 1); - _h_DIS_dsigdDeltaEta = bookHisto1D(7, 1, 1); + book(_h_DIS_dsigdzPom, 1, 1, 1); + book(_h_DIS_dsigdlogXpom, 2, 1, 1); + book(_h_DIS_dsigdW, 3, 1, 1); + book(_h_DIS_dsigdQ2, 4, 1, 1); + book(_h_DIS_dsigdEtJet1, 5, 1, 1); + book(_h_DIS_dsigdAvgEta, 6, 1, 1); + book(_h_DIS_dsigdDeltaEta, 7, 1, 1); - _h_PHO_dsigdzPom = bookHisto1D(8, 1, 1); - _h_PHO_dsigdxGam = bookHisto1D(9, 1, 1); - _h_PHO_dsigdlogXpom = bookHisto1D(10, 1, 1); - _h_PHO_dsigdW = bookHisto1D(11, 1, 1); - _h_PHO_dsigdEtJet1 = bookHisto1D(12, 1, 1); - _h_PHO_dsigdAvgEta = bookHisto1D(13, 1, 1); - _h_PHO_dsigdDeltaEta = bookHisto1D(14, 1, 1); - _h_PHO_dsigdMjets = bookHisto1D(15, 1, 1); + book(_h_PHO_dsigdzPom, 8, 1, 1); + book(_h_PHO_dsigdxGam, 9, 1, 1); + book(_h_PHO_dsigdlogXpom, 10, 1, 1); + book(_h_PHO_dsigdW, 11, 1, 1); + book(_h_PHO_dsigdEtJet1, 12, 1, 1); + book(_h_PHO_dsigdAvgEta, 13, 1, 1); + book(_h_PHO_dsigdDeltaEta, 14, 1, 1); + book(_h_PHO_dsigdMjets, 15, 1, 1); isDIS = false; nVeto0 = 0; nVeto1 = 0; nVeto2 = 0; nVeto3 = 0; nVeto4 = 0; nVeto5 = 0; nPHO = 0; nDIS = 0; } // Do the analysis void analyze(const Event& event) { // Event weight - const double weight = event.weight(); isDIS = false; // Projections - special handling of events where no proton found: const RapidityGap& rg = apply(event, "RapidityGap"); const DISKinematics& kin = apply(event, "Kinematics"); const BoostedXSystem& disfsXcm = apply( event, "BoostedXFS"); // Determine kinematics: H1 has +z = proton direction int dir = kin.orientation(); double W2 = kin.W2(); double W = sqrt(W2); double y = kin.y(); double Q2 = kin.Q2(); // Separate into DIS and PHO regimes else veto if (!inRange(W, 165.*GeV, 242.*GeV)) vetoEvent; if (Q2 < 0.01*GeV2) { isDIS = false; ++nPHO; } else if (inRange(Q2, 4.0*GeV2, 80.*GeV2)) { isDIS = true; ++nDIS; } else { vetoEvent; } ++nVeto0; // Find diffractive variables as defined in paper. const double M2Y = rg.M2Y(); const double M2X = rg.M2X(); const double abst = abs(rg.t()); const double xPom = (isDIS) ? (Q2 + M2X) / (Q2 + W2) : rg.EpPzX(RapidityGap::LAB) / (2. * kin.beamHadron().E()); // Veto if outside allowed region if (sqrt(M2Y) > 1.6*GeV) vetoEvent; ++nVeto1; if (abst > 1.0*GeV2) vetoEvent; ++nVeto2; if (xPom > 0.03) vetoEvent; ++nVeto3; // Jet selection. Note jets are found in photon-proton (XCM) // frame, but eta cut is applied in lab frame! Cut jetcuts = Cuts::Et > 4.* GeV; Jets jets = apply(event, "DISFSJets").jets(jetcuts, cmpMomByEt); // Veto if not dijets and if Et_j1 < 5.0 if (jets.size() < 2) vetoEvent; if (jets[0].Et() < 5.*GeV) vetoEvent; ++nVeto4; // Find Et_jet1 and deltaEta* in XCM frame double EtJet1 = jets[0].Et() * GeV; double etaXCMJet1 = jets[0].eta(); double etaXCMJet2 = jets[1].eta(); double deltaEtaJets = abs(etaXCMJet1 - etaXCMJet2); // Transform from XCM to HCM const LorentzTransform xcmboost = disfsXcm.boost(); for (int i = 0; i < 2; ++i) jets[i].transformBy(xcmboost.inverse()); // Find mass of jets and EpPz, EmPz of jets FourMomentum momJets = jets[0].momentum() + jets[1].momentum(); double M2jets = momJets.mass2(); double EpPzJets = 0.; double EmPzJets = 0.; // DIS variables are found in XCM frame, so boost back again if (isDIS){ for (int i = 0; i < 2; ++i) jets[i].transformBy(xcmboost); } // Note sign change wrt. H1 because photon is in +z direction // Jets in HCM so no need to consider orientation. for (int i = 0; i < 2; ++i){ EpPzJets += jets[i].E() - jets[i].pz(); // Sign: + => - EmPzJets += jets[i].E() + jets[i].pz(); // Sign: - => + } // Transform the jets from HCM to LAB frame where eta cut is // applied for photoproduction. const LorentzTransform hcmboost = kin.boostHCM(); for (int i = 0; i < 2; ++i) jets[i].transformBy(hcmboost.inverse()); double etaLabJet1 = dir * jets[0].eta(); double etaLabJet2 = dir * jets[1].eta(); double etaMin = (isDIS) ? -3. : -1.; double etaMax = (isDIS) ? 0. : 2.; double eta1 = (isDIS) ? etaXCMJet1 : etaLabJet1; double eta2 = (isDIS) ? etaXCMJet2 : etaLabJet2; if (!inRange(eta1, etaMin, etaMax)) vetoEvent; if (!inRange(eta2, etaMin, etaMax)) vetoEvent; ++nVeto5; // Pseudorapidity distributions are examined in lab frame: double avgEtaJets = 0.5 * (etaLabJet1 + etaLabJet2); // Derive xPom and xGam values from the jet kinematics. double zPomJets, xGamJets; if (isDIS) { zPomJets = (Q2 + M2jets) / (Q2 + M2X); xGamJets = EmPzJets / rg.EmPzX(RapidityGap::XCM); } else { // Boost E_p, E_e to HCM frame FourMomentum lep = hcmboost.transform(kin.beamLepton().momentum()); FourMomentum had = hcmboost.transform(kin.beamHadron().momentum()); zPomJets = EpPzJets / (2. * xPom * had.E()); xGamJets = EmPzJets / (2. * y * lep.E()); } // Now fill histograms if (isDIS){ - _h_DIS_dsigdzPom ->fill(zPomJets, weight); - _h_DIS_dsigdlogXpom ->fill(log10(xPom), weight); - _h_DIS_dsigdW ->fill(W, weight); - _h_DIS_dsigdQ2 ->fill(Q2, weight); - _h_DIS_dsigdEtJet1 ->fill(EtJet1, weight); - _h_DIS_dsigdAvgEta ->fill(avgEtaJets, weight); - _h_DIS_dsigdDeltaEta ->fill(deltaEtaJets, weight); + _h_DIS_dsigdzPom ->fill(zPomJets); + _h_DIS_dsigdlogXpom ->fill(log10(xPom)); + _h_DIS_dsigdW ->fill(W); + _h_DIS_dsigdQ2 ->fill(Q2); + _h_DIS_dsigdEtJet1 ->fill(EtJet1); + _h_DIS_dsigdAvgEta ->fill(avgEtaJets); + _h_DIS_dsigdDeltaEta ->fill(deltaEtaJets); } else { - _h_PHO_dsigdzPom ->fill(zPomJets, weight); - _h_PHO_dsigdxGam ->fill(xGamJets, weight); - _h_PHO_dsigdlogXpom ->fill(log10(xPom), weight); - _h_PHO_dsigdW ->fill(W, weight); - _h_PHO_dsigdEtJet1 ->fill(EtJet1, weight); - _h_PHO_dsigdAvgEta ->fill(avgEtaJets, weight); - _h_PHO_dsigdDeltaEta ->fill(deltaEtaJets, weight); - _h_PHO_dsigdMjets ->fill(sqrt(M2jets), weight); + _h_PHO_dsigdzPom ->fill(zPomJets); + _h_PHO_dsigdxGam ->fill(xGamJets); + _h_PHO_dsigdlogXpom ->fill(log10(xPom)); + _h_PHO_dsigdW ->fill(W); + _h_PHO_dsigdEtJet1 ->fill(EtJet1); + _h_PHO_dsigdAvgEta ->fill(avgEtaJets); + _h_PHO_dsigdDeltaEta ->fill(deltaEtaJets); + _h_PHO_dsigdMjets ->fill(sqrt(M2jets)); } } // Finalize void finalize() { // Normalise to cross section const double norm = crossSection()/picobarn/sumOfWeights(); scale( _h_DIS_dsigdzPom , norm); scale( _h_DIS_dsigdlogXpom , norm); scale( _h_DIS_dsigdW , norm); scale( _h_DIS_dsigdQ2 , norm); scale( _h_DIS_dsigdEtJet1 , norm); scale( _h_DIS_dsigdAvgEta , norm); scale( _h_DIS_dsigdDeltaEta, norm); scale( _h_PHO_dsigdzPom , norm); scale( _h_PHO_dsigdxGam , norm); scale( _h_PHO_dsigdlogXpom , norm); scale( _h_PHO_dsigdW , norm); scale( _h_PHO_dsigdEtJet1 , norm); scale( _h_PHO_dsigdAvgEta , norm); scale( _h_PHO_dsigdDeltaEta, norm); scale( _h_PHO_dsigdMjets , norm); const double dPHO = nPHO; MSG_INFO("H1_2007_I746380"); MSG_INFO("Cross section = " << crossSection()/picobarn << " pb"); MSG_INFO("Number of events = " << numEvents() << ", sumW = " << sumOfWeights()); MSG_INFO("Number of PHO = " << nPHO << ", number of DIS = " << nDIS); MSG_INFO("Events passing electron veto = " << nVeto0 << " (" << nVeto0/dPHO * 100. << "%)" ); MSG_INFO("Events passing MY = " << nVeto1 << " (" << nVeto1/dPHO * 100. << "%)" ); MSG_INFO("Events passing t veto = " << nVeto2 << " (" << nVeto2/dPHO * 100. << "%)" ); MSG_INFO("Events passing xPom = " << nVeto3 << " (" << nVeto3/dPHO * 100. << "%)" ); MSG_INFO("Events passing jet Et veto = " << nVeto4 << " (" << nVeto4/dPHO * 100. << "%)" ); MSG_INFO("Events passing jet eta veto = " << nVeto5 << " (" << nVeto5/dPHO * 100. << "%)" ); } //@} private: /// @name Histograms //@{ // Book histograms from REF data Histo1DPtr _h_DIS_dsigdzPom ; Histo1DPtr _h_DIS_dsigdlogXpom ; Histo1DPtr _h_DIS_dsigdW ; Histo1DPtr _h_DIS_dsigdQ2 ; Histo1DPtr _h_DIS_dsigdEtJet1 ; Histo1DPtr _h_DIS_dsigdAvgEta ; Histo1DPtr _h_DIS_dsigdDeltaEta; Histo1DPtr _h_PHO_dsigdzPom ; Histo1DPtr _h_PHO_dsigdxGam ; Histo1DPtr _h_PHO_dsigdlogXpom ; Histo1DPtr _h_PHO_dsigdW ; Histo1DPtr _h_PHO_dsigdEtJet1 ; Histo1DPtr _h_PHO_dsigdAvgEta ; Histo1DPtr _h_PHO_dsigdDeltaEta; Histo1DPtr _h_PHO_dsigdMjets ; //@} bool isDIS; int nVeto0, nVeto1, nVeto2, nVeto3, nVeto4, nVeto5; int nPHO, nDIS; }; DECLARE_RIVET_PLUGIN(H1_2007_I746380); } diff --git a/analyses/pluginHERA/H1_2015_I1343110.cc b/analyses/pluginHERA/H1_2015_I1343110.cc --- a/analyses/pluginHERA/H1_2015_I1343110.cc +++ b/analyses/pluginHERA/H1_2015_I1343110.cc @@ -1,595 +1,594 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DISKinematics.hh" #include "Rivet/Projections/DISFinalState.hh" #include "Rivet/Projections/DISDiffHadron.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { namespace H1_2015_I1343110_PROJECTIONS { /// Projection to find the largest gaps and the masses of the two /// systems separated by the gap. Based on the HZTools gap-finding /// method (hzhadgap.F). Note that gaps are found in the HCM frame. /// /// @author Christine O. Rasmussen. class RapidityGap : public Projection { public: /// Type of DIS boost to apply enum Frame { HCM, LAB, XCM }; RapidityGap() { setName("RapidityGap"); - addProjection(DISKinematics(), "DISKIN"); - addProjection(DISFinalState(DISFinalState::HCM), "DISFS"); + declare(DISKinematics(), "DISKIN"); + declare(DISFinalState(DISFinalState::BoostFrame::HCM), "DISFS"); } DEFAULT_RIVET_PROJ_CLONE(RapidityGap); const double M2X() const {return _M2X;} const double M2Y() const {return _M2Y;} const double t() const {return _t;} const double gap() const {return _gap;} const double gapUpp() const {return _gapUpp;} const double gapLow() const {return _gapLow;} const double EpPzX(Frame f) const { if (f == LAB) return _ePpzX_LAB; else if (f == XCM) return _ePpzX_XCM; else return _ePpzX_HCM; } const double EmPzX(Frame f) const { if (f == LAB) return _eMpzX_LAB; else if (f == XCM) return _eMpzX_XCM; else return _eMpzX_HCM; } const FourMomentum pX(Frame f) const { if (f == LAB) return _momX_LAB; else if (f == XCM) return _momX_XCM; else return _momX_HCM; } const FourMomentum pY(Frame f) const { if (f == LAB) return _momY_LAB; else if (f == XCM) return _momY_XCM; else return _momY_HCM; } const Particles& systemX(Frame f) const { if (f == LAB) return _pX_LAB; else if (f == XCM) return _pX_XCM; else return _pX_HCM; } const Particles& systemY(Frame f) const { if (f == LAB) return _pY_LAB; else if (f == XCM) return _pY_XCM; else return _pY_HCM; } protected: - virtual int compare(const Projection& p) const { + virtual CmpState compare(const Projection& p) const { const RapidityGap& other = pcast(p); return mkNamedPCmp(other, "DISKIN") || mkNamedPCmp(other, "DISFS"); } virtual void project(const Event& e){ const DISKinematics& dk = apply(e, "DISKIN"); const Particles& p = apply(e, "DISFS").particles(cmpMomByEta); findgap(p, dk); } void clearAll(){ _M2X = _M2Y = _t = _gap = 0.; _gapUpp = _gapLow = -8.; _ePpzX_HCM = _eMpzX_HCM =_ePpzX_LAB = _eMpzX_LAB = _ePpzX_XCM = _eMpzX_XCM = 0.; _momX_HCM.setPE(0., 0., 0., 0.); _momY_HCM.setPE(0., 0., 0., 0.); _momX_XCM.setPE(0., 0., 0., 0.); _momY_XCM.setPE(0., 0., 0., 0.); _momX_LAB.setPE(0., 0., 0., 0.); _momY_LAB.setPE(0., 0., 0., 0.); _pX_HCM.clear(); _pY_HCM.clear(); _pX_XCM.clear(); _pY_XCM.clear(); _pX_LAB.clear(); _pY_LAB.clear(); } void findgap(const Particles& particles, const DISKinematics& diskin){ clearAll(); // Begin by finding largest gap and gapedges between all final // state particles in HCM frame. int nP = particles.size(); int dir = diskin.orientation(); for (int i = 0; i < nP-1; ++i){ double tmpGap = abs(particles[i+1].eta() - particles[i].eta()); if (tmpGap > _gap) { _gap = tmpGap; _gapLow = (dir > 0) ? particles[i].eta() : dir * particles[i+1].eta(); _gapUpp = (dir > 0) ? particles[i+1].eta() : dir * particles[i].eta(); } } // Define the two systems X and Y. Particles tmp_pX, tmp_pY; - foreach (const Particle& ip, particles) { + for (const Particle& ip : particles) { if (dir * ip.eta() > _gapLow) tmp_pX.push_back(ip); else tmp_pY.push_back(ip); } Particles pX, pY; pX = (dir < 0) ? tmp_pY : tmp_pX; pY = (dir < 0) ? tmp_pX : tmp_pY; // Find variables related to HCM frame. // Note that HCM has photon along +z, as opposed to // H1 where proton is along +z. This results in a sign change // as compared to H1 papers! // X - side FourMomentum momX; - foreach (const Particle& jp, pX) { + for (const Particle& jp : pX) { momX += jp.momentum(); _ePpzX_HCM += jp.E() - jp.pz(); // Sign + => - _eMpzX_HCM += jp.E() + jp.pz(); // Sign - => + } _momX_HCM = momX; _pX_HCM = pX; _M2X = _momX_HCM.mass2(); // Y - side FourMomentum momY; - foreach (const Particle& kp, pY) momY += kp.momentum(); + for (const Particle& kp : pY) momY += kp.momentum(); _momY_HCM = momY; _pY_HCM = pY; _M2Y = _momY_HCM.mass2(); // Find variables related to LAB frame const LorentzTransform hcmboost = diskin.boostHCM(); const LorentzTransform hcminverse = hcmboost.inverse(); _momX_LAB = hcminverse.transform(_momX_HCM); _momY_LAB = hcminverse.transform(_momY_HCM); // Find momenta in XCM frame. Note that it is HCM frame that is // boosted, resulting in a sign change later! const bool doXCM = (momX.betaVec().mod2() < 1.); if (doXCM) { const LorentzTransform xcmboost = LorentzTransform::mkFrameTransformFromBeta(momX.betaVec()); _momX_XCM = xcmboost.transform(momX); _momY_XCM = xcmboost.transform(momY); } - foreach (const Particle& jp, pX) { + for (const Particle& jp : pX) { // Boost from HCM to LAB. FourMomentum lab = hcminverse.transform(jp.momentum()); _ePpzX_LAB += lab.E() + dir * lab.pz(); _eMpzX_LAB += lab.E() - dir * lab.pz(); Particle plab = jp; plab.setMomentum(lab); _pX_LAB.push_back(plab); // Set XCM. Note that since HCM frame is boosted to XCM frame, // we have a sign change if (doXCM) { const LorentzTransform xcmboost = LorentzTransform::mkFrameTransformFromBeta(_momX_HCM.betaVec()); FourMomentum xcm = xcmboost.transform(jp.momentum()); _ePpzX_XCM += xcm.E() - xcm.pz(); // Sign + => - _eMpzX_XCM += xcm.E() + xcm.pz(); // Sign - => + Particle pxcm = jp; pxcm.setMomentum(xcm); _pX_XCM.push_back(pxcm); } } - foreach (const Particle& jp, pY) { + for (const Particle& jp : pY) { // Boost from HCM to LAB FourMomentum lab = hcminverse.transform(jp.momentum()); Particle plab = jp; plab.setMomentum(lab); _pY_LAB.push_back(plab); // Boost from HCM to XCM if (doXCM) { const LorentzTransform xcmboost = LorentzTransform::mkFrameTransformFromBeta(_momX_HCM.betaVec()); FourMomentum xcm = xcmboost.transform(jp.momentum()); Particle pxcm = jp; pxcm.setMomentum(xcm); _pY_XCM.push_back(pxcm); } } // Find t: Currently can only handle gap on proton side. // @TODO: Expand to also handle gap on photon side // Boost p from LAB to HCM frame to find t. const FourMomentum proton = hcmboost.transform(diskin.beamHadron().momentum()); FourMomentum pPom = proton - _momY_HCM; _t = pPom * pPom; } private: double _M2X, _M2Y, _t; double _gap, _gapUpp, _gapLow; double _ePpzX_LAB, _eMpzX_LAB, _ePpzX_HCM, _eMpzX_HCM, _ePpzX_XCM, _eMpzX_XCM; FourMomentum _momX_HCM, _momY_HCM,_momX_LAB, _momY_LAB, _momX_XCM, _momY_XCM; Particles _pX_HCM, _pY_HCM, _pX_LAB, _pY_LAB, _pX_XCM, _pY_XCM; }; /// Projection to boost system X (photon+Pomeron) particles into its rest frame. /// /// @author Ilkka Helenius class BoostedXSystem : public FinalState { public: BoostedXSystem(const FinalState& fs) { setName("BoostedXSystem"); declare(fs,"FS"); - addProjection(RapidityGap(), "RAPGAP"); + declare(RapidityGap(), "RAPGAP"); } // Return the boost to XCM frame. const LorentzTransform& boost() const { return _boost; } DEFAULT_RIVET_PROJ_CLONE(BoostedXSystem); protected: // Apply the projection on the supplied event. void project(const Event& e){ const RapidityGap& rg = apply(e, "RAPGAP"); // Total momentum of the system X. const FourMomentum pX = rg.pX(RapidityGap::HCM); // Reset the boost. Is there a separate method for this? _boost = combine(_boost, _boost.inverse()); // Define boost only when numerically safe, otherwise negligible. if (pX.betaVec().mod2() < 1.) _boost = LorentzTransform::mkFrameTransformFromBeta(pX.betaVec()); // Boost the particles from system X. _theParticles.clear(); _theParticles.reserve(rg.systemX(RapidityGap::HCM).size()); for (const Particle& p : rg.systemX(RapidityGap::HCM)) { Particle temp = p; temp.setMomentum(_boost.transform(temp.momentum())); _theParticles.push_back(temp); } } // Compare projections. - int compare(const Projection& p) const { + CmpState compare(const Projection& p) const { const BoostedXSystem& other = pcast(p); return mkNamedPCmp(other, "RAPGAP") || mkNamedPCmp(other, "FS"); } private: LorentzTransform _boost; }; } /// @brief H1 diffractive dijets /// /// Diffractive dijets H1 with 920 GeV p and 27.5 GeV e /// Tagged protons & jets found in gamma*p rest frame. /// /// @author Christine O. Rasmussen class H1_2015_I1343110 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(H1_2015_I1343110); typedef H1_2015_I1343110_PROJECTIONS::RapidityGap RapidityGap; typedef H1_2015_I1343110_PROJECTIONS::BoostedXSystem BoostedXSystem; /// @name Analysis methods //@{ // Book projections and histograms void init() { declare(DISKinematics(), "Kinematics"); - const DISFinalState& disfs = declare(DISFinalState(DISFinalState::HCM), "DISFS"); + const DISFinalState& disfs = declare(DISFinalState(DISFinalState::BoostFrame::HCM), "DISFS"); const BoostedXSystem& disfsXcm = declare( BoostedXSystem(disfs), "BoostedXFS"); declare(FastJets(disfsXcm, fastjet::JetAlgorithm::kt_algorithm, fastjet::RecombinationScheme::pt_scheme, 1.0, - JetAlg::ALL_MUONS, JetAlg::NO_INVISIBLES, nullptr), "DISFSJets"); + JetAlg::Muons::ALL, JetAlg::Invisibles::NONE, nullptr), "DISFSJets"); declare(DISDiffHadron(), "Hadron"); declare(RapidityGap(), "RapidityGap"); // Book histograms from REF data - _h_PHO_sig_sqrts = bookHisto1D(1, 1, 1); - _h_DIS_sig_sqrts = bookHisto1D(2, 1, 1); - _h_PHODIS_sqrts = bookScatter2D(3, 1, 1); + book(_h_PHO_sig_sqrts, 1, 1, 1); + book(_h_DIS_sig_sqrts, 2, 1, 1); + book(_h_PHODIS_sqrts, 3, 1, 1); - _h_DIS_dsigdz = bookHisto1D(4, 1, 1); - _h_DIS_dsigdxPom = bookHisto1D(5, 1, 1); - _h_DIS_dsigdy = bookHisto1D(6, 1, 1); - _h_DIS_dsigdQ2 = bookHisto1D(7, 1, 1); - _h_DIS_dsigdEtj1 = bookHisto1D(8, 1, 1); - _h_DIS_dsigdMX = bookHisto1D(9, 1, 1); - _h_DIS_dsigdDeltaEta = bookHisto1D(10, 1, 1); - _h_DIS_dsigdAvgEta = bookHisto1D(11, 1, 1); + book(_h_DIS_dsigdz, 4, 1, 1); + book(_h_DIS_dsigdxPom, 5, 1, 1); + book(_h_DIS_dsigdy, 6, 1, 1); + book(_h_DIS_dsigdQ2, 7, 1, 1); + book(_h_DIS_dsigdEtj1, 8, 1, 1); + book(_h_DIS_dsigdMX, 9, 1, 1); + book(_h_DIS_dsigdDeltaEta, 10, 1, 1); + book(_h_DIS_dsigdAvgEta, 11, 1, 1); - _h_PHO_dsigdz = bookHisto1D(12, 1, 1); - _h_PHO_dsigdxPom = bookHisto1D(13, 1, 1); - _h_PHO_dsigdy = bookHisto1D(14, 1, 1); - _h_PHO_dsigdxGam = bookHisto1D(15, 1, 1); - _h_PHO_dsigdEtj1 = bookHisto1D(16, 1, 1); - _h_PHO_dsigdMX = bookHisto1D(17, 1, 1); - _h_PHO_dsigdDeltaEta = bookHisto1D(18, 1, 1); - _h_PHO_dsigdAvgEta = bookHisto1D(19, 1, 1); + book(_h_PHO_dsigdz, 12, 1, 1); + book(_h_PHO_dsigdxPom, 13, 1, 1); + book(_h_PHO_dsigdy, 14, 1, 1); + book(_h_PHO_dsigdxGam, 15, 1, 1); + book(_h_PHO_dsigdEtj1, 16, 1, 1); + book(_h_PHO_dsigdMX, 17, 1, 1); + book(_h_PHO_dsigdDeltaEta, 18, 1, 1); + book(_h_PHO_dsigdAvgEta, 19, 1, 1); - _h_PHODIS_deltaEta = bookScatter2D(20, 1, 1); - _h_PHODIS_y = bookScatter2D(21, 1, 1); - _h_PHODIS_z = bookScatter2D(22, 1, 1); - _h_PHODIS_Etj1 = bookScatter2D(23, 1, 1); + book(_h_PHODIS_deltaEta, 20, 1, 1); + book(_h_PHODIS_y, 21, 1, 1); + book(_h_PHODIS_z, 22, 1, 1); + book(_h_PHODIS_Etj1, 23, 1, 1); isPHO = false; nVeto1 = 0; nVeto2 = 0; nVeto3 = 0; nVeto4 = 0; nVeto5 = 0; nVeto6 = 0; nPHO = 0; nDIS = 0; } // Do the analysis void analyze(const Event& event) { // Event weight - const double weight = event.weight(); isPHO = false; // Projections - special handling of events where no proton found: const RapidityGap& rg = apply(event, "RapidityGap"); const DISKinematics& kin = apply(event, "Kinematics"); const BoostedXSystem& disfsXcm = apply( event, "BoostedXFS"); Particle hadronOut; Particle hadronIn; try { const DISDiffHadron& diffhadr = apply(event, "Hadron"); hadronOut = diffhadr.out(); hadronIn = diffhadr.in(); } catch (const Error& e){ vetoEvent; } // Determine kinematics: H1 has +z = proton direction int dir = kin.orientation(); double y = kin.y(); double Q2 = kin.Q2(); // Separate into DIS and PHO regimes else veto if (Q2 < 2.*GeV2 && inRange(y, 0.2, 0.70)) { isPHO = true; ++nPHO; } else if (inRange(Q2, 4.0*GeV2, 80.*GeV2) && inRange(y, 0.2, 0.7)) { isPHO = false; ++nDIS; } else vetoEvent; ++nVeto1; // Find diffractive variables as defined in paper. // Note tagged protons in VFPS => smaller allowed xPom range // xPom = 1 - E'/E, M2X from hadrons, t = (P-P')^2 const double M2X = rg.M2X(); const double abst = abs(rg.t()); const double xPom = 1. - hadronOut.energy() / hadronIn.energy(); //cout << "\nhadout=" << hadronOut.energy() << ", hadin=" << hadronIn.energy() << endl; //cout << "xPomH1=" << (Q2+M2X) / (y * sqr(sqrtS())) << endl; //cout << "|t|=" << abst << ", xPom=" << xPom << endl; // Veto if outside allowed region if (abst > 0.6 * GeV2) vetoEvent; ++nVeto2; if (!inRange(xPom, 0.010, 0.024)) vetoEvent; ++nVeto3; // Jet selection. Note jets are found in XCM frame, but // eta cut is applied in lab frame! Cut jetcuts = Cuts::Et > 4.* GeV; Jets jets = apply(event, "DISFSJets").jets(jetcuts, cmpMomByEt); // Veto if not dijets and if Et_j1 < 5.5 if (jets.size() < 2) vetoEvent; if (jets[0].Et() < 5.5 * GeV) vetoEvent; ++nVeto4; // Find Et_jet1 in XCM frame double EtJet1 = jets[0].Et() * GeV; //cout << "gamma*p frame:" << endl; //cout << "Et1=" << jets[0].Et() << ", E1=" << jets[0].E() << ", pz1=" << jets[0].pz() << ", m1=" << jets[0].mass() << endl; //cout << "Et2=" << jets[1].Et() << ", E2=" << jets[1].E() << ", pz2=" << jets[1].pz() << ", m2=" << jets[1].mass() << endl; // Transform from XCM to HCM const LorentzTransform xcmboost = disfsXcm.boost(); for (int i = 0; i < 2; ++i) jets[i].transformBy(xcmboost.inverse()); // Find mass of jets and EpPz, EmPz of jets in HCM frame. FourMomentum momJets = jets[0].momentum() + jets[1].momentum(); double M2jets = momJets.mass2(); double EpPzJets = 0.; double EmPzJets = 0.; // Note sign change wrt. H1 because photon is in +z direction for (int i = 0; i < 2; ++i){ EpPzJets += jets[i].E() - jets[i].pz(); // Sign: + => - EmPzJets += jets[i].E() + jets[i].pz(); // Sign: - => + } // Transform the jets from HCM to LAB frame where eta cut is // applied for photoproduction. const LorentzTransform hcmboost = kin.boostHCM(); for (int i = 0; i < 2; ++i) jets[i].transformBy(hcmboost.inverse()); double etaLabJet1 = dir * jets[0].eta(); double etaLabJet2 = dir * jets[1].eta(); if (!inRange(etaLabJet1, -1., 2.5)) vetoEvent; if (!inRange(etaLabJet2, -1., 2.5)) vetoEvent; ++nVeto5; // Pseudorapidity distributions are examined in lab frame: double deltaEtaJets = abs(dir * jets[0].eta() - dir * jets[1].eta()); double avgEtaJets = 0.5 * (dir * jets[0].eta() + dir * jets[1].eta()); // Evaluate observables double zPomJets, xGamJets = 0.; if (isPHO){ zPomJets = EpPzJets / rg.EpPzX(RapidityGap::HCM); xGamJets = EmPzJets / rg.EmPzX(RapidityGap::HCM); //cout << "xGamJets=" << xGamJets << endl; } else { zPomJets = (Q2 + M2jets) / (Q2 + M2X); } //cout << "lab frame:" << endl; //cout << "Et1=" << jets[0].Et() << ", E1=" << jets[0].E() << ", pz1=" << jets[0].pz() << ", m1=" << jets[0].mass() << endl; //cout << "Et2=" << jets[1].Et() << ", E2=" << jets[1].E() << ", pz2=" << jets[1].pz() << ", m2=" << jets[1].mass() << endl; //cout << "EpPzJets=" << EpPzJets << ", EmPzJets=" << EmPzJets << endl; //cout << "Et*exp(eta)=" << jets[0].Et()*exp(etaLabJet1) + jets[1].Et()*exp(etaLabJet2) << endl; //cout << "Et*exp(-eta)=" << jets[0].Et()*exp(-etaLabJet1) + jets[1].Et()*exp(-etaLabJet2) << endl; //cout << "EpPz=" << rg.EpPzX(RapidityGap::HCM) << ", EmPz=" << rg.EmPzX(RapidityGap::HCM) << endl; //cout << "2 xPom Ep=" << 2. * xPom * kin.beamHadron().E() << ", 2 y Ee=" << 2. * y * kin.beamLepton().E() << endl; //cout << "xGam=" << xGamJets << ", zPom=" << zPomJets << endl; //cout << "M12=" << M2jets << ", deltaEta=" << deltaEtaJets << ", avgEta=" << avgEtaJets << endl; // Veto events with zPom > 0.8 if (zPomJets > 0.8) vetoEvent; ++nVeto6; // Now fill histograms if (isPHO){ - _h_PHO_sig_sqrts ->fill(sqrtS()/GeV, weight); - _h_PHO_dsigdz ->fill(zPomJets, weight); - _h_PHO_dsigdxPom ->fill(xPom, weight); - _h_PHO_dsigdy ->fill(y, weight); - _h_PHO_dsigdxGam ->fill(xGamJets, weight); - _h_PHO_dsigdEtj1 ->fill(EtJet1, weight); - _h_PHO_dsigdMX ->fill(sqrt(M2X)*GeV, weight); - _h_PHO_dsigdDeltaEta ->fill(deltaEtaJets, weight); - _h_PHO_dsigdAvgEta ->fill(avgEtaJets, weight); + _h_PHO_sig_sqrts ->fill(sqrtS()/GeV); + _h_PHO_dsigdz ->fill(zPomJets); + _h_PHO_dsigdxPom ->fill(xPom); + _h_PHO_dsigdy ->fill(y); + _h_PHO_dsigdxGam ->fill(xGamJets); + _h_PHO_dsigdEtj1 ->fill(EtJet1); + _h_PHO_dsigdMX ->fill(sqrt(M2X)/GeV); + _h_PHO_dsigdDeltaEta ->fill(deltaEtaJets); + _h_PHO_dsigdAvgEta ->fill(avgEtaJets); } else { - _h_DIS_sig_sqrts ->fill(sqrtS()/GeV, weight); - _h_DIS_dsigdz ->fill(zPomJets, weight); - _h_DIS_dsigdxPom ->fill(xPom, weight); - _h_DIS_dsigdy ->fill(y, weight); - _h_DIS_dsigdQ2 ->fill(Q2, weight); - _h_DIS_dsigdEtj1 ->fill(EtJet1, weight); - _h_DIS_dsigdMX ->fill(sqrt(M2X)*GeV, weight); - _h_DIS_dsigdDeltaEta ->fill(deltaEtaJets, weight); - _h_DIS_dsigdAvgEta ->fill(avgEtaJets, weight); + _h_DIS_sig_sqrts ->fill(sqrtS()/GeV); + _h_DIS_dsigdz ->fill(zPomJets); + _h_DIS_dsigdxPom ->fill(xPom); + _h_DIS_dsigdy ->fill(y); + _h_DIS_dsigdQ2 ->fill(Q2); + _h_DIS_dsigdEtj1 ->fill(EtJet1); + _h_DIS_dsigdMX ->fill(sqrt(M2X)/GeV); + _h_DIS_dsigdDeltaEta ->fill(deltaEtaJets); + _h_DIS_dsigdAvgEta ->fill(avgEtaJets); } } // Finalize void finalize() { // Normalise to cross section // Remember to manually scale the cross section afterwards with // the number of rejected events. const double norm = crossSection()/picobarn/sumOfWeights(); scale(_h_PHO_sig_sqrts, norm); scale(_h_PHO_dsigdz, norm); scale(_h_PHO_dsigdxPom, norm); scale(_h_PHO_dsigdy, norm); scale(_h_PHO_dsigdxGam, norm); scale(_h_PHO_dsigdEtj1, norm); scale(_h_PHO_dsigdMX, norm); scale(_h_PHO_dsigdDeltaEta, norm); scale(_h_PHO_dsigdAvgEta, norm); scale(_h_DIS_sig_sqrts, norm); scale(_h_DIS_dsigdz, norm); scale(_h_DIS_dsigdxPom, norm); scale(_h_DIS_dsigdy, norm); scale(_h_DIS_dsigdQ2, norm); scale(_h_DIS_dsigdEtj1, norm); scale(_h_DIS_dsigdMX, norm); scale(_h_DIS_dsigdDeltaEta, norm); scale(_h_DIS_dsigdAvgEta, norm); if (_h_DIS_sig_sqrts->numEntries() != 0) divide(_h_PHO_sig_sqrts, _h_DIS_sig_sqrts, _h_PHODIS_sqrts); if (_h_DIS_dsigdDeltaEta->numEntries() != 0) divide(_h_PHO_dsigdDeltaEta, _h_DIS_dsigdDeltaEta, _h_PHODIS_deltaEta); if (_h_DIS_dsigdy->numEntries() != 0) divide(_h_PHO_dsigdy, _h_DIS_dsigdy, _h_PHODIS_y); if (_h_DIS_dsigdz->numEntries() != 0) divide(_h_PHO_dsigdz, _h_DIS_dsigdz, _h_PHODIS_z); if (_h_DIS_dsigdEtj1->numEntries() != 0) divide(_h_PHO_dsigdEtj1, _h_DIS_dsigdEtj1, _h_PHODIS_Etj1); const double dPHO = nPHO; MSG_INFO("H1_2015_I1343110"); MSG_INFO("Cross section = " << crossSection()/picobarn << " pb"); MSG_INFO("Number of events = " << numEvents() << ", sumW = " << sumOfWeights()); MSG_INFO("Number of PHO = " << nPHO << ", number of DIS = " << nDIS); MSG_INFO("Events passing electron veto = " << nVeto1 << " (" << nVeto1/dPHO * 100. << "%)" ); MSG_INFO("Events passing t veto = " << nVeto2 << " (" << nVeto2/dPHO * 100. << "%)" ); MSG_INFO("Events passing xPom = " << nVeto3 << " (" << nVeto3/dPHO * 100. << "%)" ); MSG_INFO("Events passing jet Et veto = " << nVeto4 << " (" << nVeto4/dPHO * 100. << "%)" ); MSG_INFO("Events passing jet eta veto = " << nVeto5 << " (" << nVeto5/dPHO * 100. << "%)" ); MSG_INFO("Events passing zPom veto = " << nVeto6 << " (" << nVeto6/dPHO * 100. << "%)" ); } //@} private: /// @name Histograms //@{ // Book histograms from REF data Histo1DPtr _h_PHO_sig_sqrts; Histo1DPtr _h_DIS_sig_sqrts; Scatter2DPtr _h_PHODIS_sqrts; Histo1DPtr _h_DIS_dsigdz; Histo1DPtr _h_DIS_dsigdxPom; Histo1DPtr _h_DIS_dsigdy; Histo1DPtr _h_DIS_dsigdQ2; Histo1DPtr _h_DIS_dsigdEtj1; Histo1DPtr _h_DIS_dsigdMX; Histo1DPtr _h_DIS_dsigdDeltaEta; Histo1DPtr _h_DIS_dsigdAvgEta; Histo1DPtr _h_PHO_dsigdz; Histo1DPtr _h_PHO_dsigdxPom; Histo1DPtr _h_PHO_dsigdy; Histo1DPtr _h_PHO_dsigdxGam; Histo1DPtr _h_PHO_dsigdEtj1; Histo1DPtr _h_PHO_dsigdMX; Histo1DPtr _h_PHO_dsigdDeltaEta; Histo1DPtr _h_PHO_dsigdAvgEta; Scatter2DPtr _h_PHODIS_deltaEta; Scatter2DPtr _h_PHODIS_y; Scatter2DPtr _h_PHODIS_z; Scatter2DPtr _h_PHODIS_Etj1; //@} bool isPHO; int nVeto1, nVeto2, nVeto3, nVeto4, nVeto5, nVeto6; int nPHO, nDIS; }; DECLARE_RIVET_PLUGIN(H1_2015_I1343110); } diff --git a/analyses/pluginHERA/HERA_2015_I1353667.cc b/analyses/pluginHERA/HERA_2015_I1353667.cc --- a/analyses/pluginHERA/HERA_2015_I1353667.cc +++ b/analyses/pluginHERA/HERA_2015_I1353667.cc @@ -1,99 +1,99 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/DISKinematics.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Combined H1/ZEUS D* production cross-sections in DIS class HERA_2015_I1353667 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(HERA_2015_I1353667); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // declare(FinalState(Cuts::abseta < 5 && Cuts::pT > 100*MeV), "FS"); // FinalState fs; declare(DISKinematics(), "Kinematics"); declare(UnstableParticles(), "Dstars"); //Cuts::abspid == PID::DSTARPLUS // Book histograms - _h_pTD = bookHisto1D(1, 1, 1); - _h_etaD = bookHisto1D(2, 1, 1); - _h_zD = bookHisto1D(3, 1, 1); - _h_Q2 = bookHisto1D(4, 1, 1); - _h_y = bookHisto1D(5, 1, 1); - // _h_Q2y = bookHisto2D(6, 1, 1); + book(_h_pTD, 1, 1, 1); + book(_h_etaD, 2, 1, 1); + book(_h_zD, 3, 1, 1); + book(_h_Q2, 4, 1, 1); + book(_h_y, 5, 1, 1); + book(_h_Q2y, 6, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Determine kinematics, including event orientation const DISKinematics& kin = apply(event, "Kinematics"); //const int orientation = kin.orientation(); // Q2 and inelasticity cuts if (!inRange(kin.Q2(), 1.5*GeV2, 1000*GeV2)) vetoEvent; if (!inRange(kin.y(), 0.02, 0.7)) vetoEvent; // D* reconstruction const Particles unstables = apply(event, "Dstars") .particles(Cuts::pT > 1.5*GeV && Cuts::abseta < 1.5); const Particles dstars = filter_select(unstables, [](const Particle& p){ return p.abspid() == PID::DSTARPLUS; }); if (dstars.empty()) vetoEvent; MSG_DEBUG("#D* = " << dstars.size()); const Particle& dstar = dstars.front(); const double zD = (dstar.E() - dstar.pz()) / (2*kin.beamLepton().E()*kin.y()); // Single-differential histograms with higher low-Q2 cut if (kin.Q2() > 5*GeV2) { - _h_pTD->fill(dstar.pT()/GeV, event.weight()); - _h_etaD->fill(dstar.eta(), event.weight()); - _h_zD->fill(zD/GeV, event.weight()); - _h_Q2->fill(kin.Q2()/GeV2, event.weight()); - _h_y->fill(kin.y(), event.weight()); + _h_pTD->fill(dstar.pT()/GeV); + _h_etaD->fill(dstar.eta()); + _h_zD->fill(zD/GeV); + _h_Q2->fill(kin.Q2()/GeV2); + _h_y->fill(kin.y()); } // // Double-differential (y,Q2) histograms - // _h_Q2y->fill(kin.Q2()/GeV2, kin.y(), event.weight()); + // _h_Q2y->fill(kin.Q2()/GeV2, kin.y()); } /// Normalise histograms etc., after the run void finalize() { scale({_h_pTD, _h_etaD, _h_zD, _h_Q2, _h_y}, crossSection()/nanobarn/sumOfWeights()); } //@} /// @name Histograms //@{ Histo1DPtr _h_pTD, _h_etaD, _h_zD, _h_Q2, _h_y; Histo2DPtr _h_Q2y; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(HERA_2015_I1353667); } diff --git a/analyses/pluginHERA/ZEUS_2008_I763404.cc b/analyses/pluginHERA/ZEUS_2008_I763404.cc --- a/analyses/pluginHERA/ZEUS_2008_I763404.cc +++ b/analyses/pluginHERA/ZEUS_2008_I763404.cc @@ -1,182 +1,181 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/DISKinematics.hh" #include "Rivet/Projections/DISDiffHadron.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief ZEUS dijet photoproduction study used in the ZEUS jets PDF fit /// /// This class is a reproduction of the HZTool routine for the ZEUS /// dijet photoproduction paper which was used in the ZEUS jets PDF fit. /// /// @author Ilkka Helenius class ZEUS_2008_I763404 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ZEUS_2008_I763404); /// @name Analysis methods //@{ // Book projections and histograms void init() { /// @todo Acceptance FinalState fs; // Final state particles with central tracking detector. declare(FastJets(fs, FastJets::KT, 1.0), "Jets"); // Projections declare(DISKinematics(), "Kinematics"); declare(DISDiffHadron(), "Hadron"); - _h_dsigma_all[0] = bookHisto1D(1, 1, 1); - _h_dsigma_all[1] = bookHisto1D(2, 1, 1); - _h_dsigma_all[2] = bookHisto1D(3, 1, 1); - _h_dsigma_all[3] = bookHisto1D(4, 1, 1); - _h_dsigma_all[4] = bookHisto1D(5, 1, 1); - _h_dsigma_all[5] = bookHisto1D(6, 1, 1); - _h_xgamma = bookHisto1D(7, 1, 1); - _h_dsigma_xgamma[0][0] = bookHisto1D(8, 1, 1); - _h_dsigma_xgamma[0][1] = bookHisto1D(9, 1, 1); - _h_dsigma_xgamma[0][2] = bookHisto1D(10, 1, 1); - _h_dsigma_xgamma[0][3] = bookHisto1D(11, 1, 1); - _h_dsigma_xgamma[1][0] = bookHisto1D(12, 1, 1); - _h_dsigma_xgamma[1][1] = bookHisto1D(13, 1, 1); - _h_dsigma_xgamma[1][2] = bookHisto1D(14, 1, 1); - _h_dsigma_xgamma[1][3] = bookHisto1D(15, 1, 1); + book(_h_dsigma_all[0], 1, 1, 1); + book(_h_dsigma_all[1], 2, 1, 1); + book(_h_dsigma_all[2], 3, 1, 1); + book(_h_dsigma_all[3], 4, 1, 1); + book(_h_dsigma_all[4], 5, 1, 1); + book(_h_dsigma_all[5], 6, 1, 1); + book(_h_xgamma, 7, 1, 1); + book(_h_dsigma_xgamma[0][0], 8, 1, 1); + book(_h_dsigma_xgamma[0][1], 9, 1, 1); + book(_h_dsigma_xgamma[0][2], 10, 1, 1); + book(_h_dsigma_xgamma[0][3], 11, 1, 1); + book(_h_dsigma_xgamma[1][0], 12, 1, 1); + book(_h_dsigma_xgamma[1][1], 13, 1, 1); + book(_h_dsigma_xgamma[1][2], 14, 1, 1); + book(_h_dsigma_xgamma[1][3], 15, 1, 1); nVeto0 = 0; nVeto1 = 0; nVeto2 = 0; nVeto3 = 0; nVeto4 = 0; } // Do the analysis void analyze(const Event& event) { // Derive the DIS kinematics. const DISKinematics& kin = apply(event, "Kinematics"); // Derive the diffractive kinematics (should be used for diffractive only). Particle hadronOut; Particle hadronIn; try { const DISDiffHadron & diffhadr = apply(event, "Hadron"); hadronOut = diffhadr.out(); hadronIn = diffhadr.in(); } catch (const Error& e){ vetoEvent; } // Determine event orientation, since coord system is for +z = proton direction const int orientation = kin.orientation(); // Calculate the photon 4-momentum from the incoming and outgoing lepton. const FourMomentum qleptonIn = kin.beamLepton().momentum(); const FourMomentum qleptonOut = kin.scatteredLepton().momentum(); const FourMomentum qphoton = qleptonIn - qleptonOut; // Calculate the Pomeron 4-momentum from the incoming and outgoing hadron const FourMomentum pHadOut = hadronOut.momentum(); const FourMomentum pHadIn = hadronIn.momentum(); const FourMomentum pPomeron = pHadIn - pHadOut; // Q2 and inelasticity cuts if (kin.Q2() > 1*GeV2) vetoEvent; ++nVeto0; if (!inRange(kin.y(), 0.2, 0.85)) vetoEvent; ++nVeto1; // Jet selection and veto. const Jets jets = apply(event, "Jets") \ .jets(Cuts::Et > 6.5*GeV && Cuts::etaIn(-1.5*orientation, 1.5*orientation), cmpMomByEt); MSG_DEBUG("Jet multiplicity = " << jets.size()); if (jets.size() < 2) vetoEvent; ++nVeto2; const Jet& j1 = jets[0]; const Jet& j2 = jets[1]; if (j1.Et() < 7.5*GeV) vetoEvent; ++nVeto3; // Veto on x_Pomeron. const double xPom = ( pPomeron * qphoton ) / (pHadIn * qphoton); if (xPom > 0.025) vetoEvent; ++nVeto4; // Computation of event-level variables. const double eta1 = orientation*j1.eta(), eta2 = orientation*j2.eta(); const double xyobs = (j1.Et() * exp(-eta1) + j2.Et() * exp(-eta2)) / (2*kin.y()*kin.beamLepton().E()); const size_t i_xyobs = (xyobs < 0.75) ? 1 : 0; const double zPobs = (j1.Et() * exp(eta1) + j2.Et() * exp(eta2)) / (2*xPom*kin.beamHadron().E()); const double M_X = sqrt( (pPomeron + qphoton).mass2() ); // Fill histograms - const double weight = event.weight(); - _h_dsigma_all[0]->fill(kin.y(), weight); - _h_dsigma_all[1]->fill(M_X, weight); - _h_dsigma_all[2]->fill(xPom, weight); - _h_dsigma_all[3]->fill(zPobs, weight); - _h_dsigma_all[4]->fill(j1.Et(), weight); - _h_dsigma_all[5]->fill(eta1, weight); + _h_dsigma_all[0]->fill(kin.y()); + _h_dsigma_all[1]->fill(M_X); + _h_dsigma_all[2]->fill(xPom); + _h_dsigma_all[3]->fill(zPobs); + _h_dsigma_all[4]->fill(j1.Et()); + _h_dsigma_all[5]->fill(eta1); - _h_xgamma->fill(xyobs, weight); + _h_xgamma->fill(xyobs); - _h_dsigma_xgamma[i_xyobs][0]->fill(kin.y(), weight); - _h_dsigma_xgamma[i_xyobs][1]->fill(M_X, weight); - _h_dsigma_xgamma[i_xyobs][2]->fill(xPom, weight); - _h_dsigma_xgamma[i_xyobs][3]->fill(zPobs, weight); + _h_dsigma_xgamma[i_xyobs][0]->fill(kin.y()); + _h_dsigma_xgamma[i_xyobs][1]->fill(M_X); + _h_dsigma_xgamma[i_xyobs][2]->fill(xPom); + _h_dsigma_xgamma[i_xyobs][3]->fill(zPobs); } // Finalize void finalize() { const double norm = crossSection()/picobarn/sumOfWeights(); scale(_h_xgamma, norm); for (auto& h : _h_dsigma_all) scale(h, norm); for (auto& h : _h_dsigma_xgamma[0]) scale(h, norm); for (auto& h : _h_dsigma_xgamma[1]) scale(h, norm); // Cross section in nb for these observables. scale(_h_dsigma_all[2], 1e-3); scale(_h_dsigma_xgamma[0][2], 1e-3); scale(_h_dsigma_xgamma[1][2], 1e-3); double dPHO = nVeto1; MSG_INFO("ZEUS_2008_I763403"); MSG_INFO("Cross section = " << crossSection()/picobarn); MSG_INFO("Number of events = " << numEvents() << ", sumW = " << sumOfWeights()); MSG_INFO("Events passing electron veto1= " << nVeto0 << " (" << nVeto0/dPHO * 100. << "%)" ); MSG_INFO("Events passing electron veto2= " << nVeto1 << " (" << nVeto1/dPHO * 100. << "%)" ); MSG_INFO("Events passing jet size veto = " << nVeto2 << " (" << nVeto2/dPHO * 100. << "%)" ); MSG_INFO("Events passing jet Et veto = " << nVeto3 << " (" << nVeto3/dPHO * 100. << "%)" ); MSG_INFO("Events passing xPom veto = " << nVeto4 << " (" << nVeto4/dPHO * 100. << "%)" ); } //@} private: /// @name Histograms //@{ Histo1DPtr _h_dsigma_all[6]; Histo1DPtr _h_xgamma; Histo1DPtr _h_dsigma_xgamma[2][4]; //@} int nVeto0, nVeto1, nVeto2, nVeto3, nVeto4; }; DECLARE_RIVET_PLUGIN(ZEUS_2008_I763404); } diff --git a/analyses/pluginHERA/ZEUS_2012_I1116258.cc b/analyses/pluginHERA/ZEUS_2012_I1116258.cc --- a/analyses/pluginHERA/ZEUS_2012_I1116258.cc +++ b/analyses/pluginHERA/ZEUS_2012_I1116258.cc @@ -1,154 +1,152 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/DISKinematics.hh" #include "Rivet/Projections/FastJets.hh" #include "fastjet/SISConePlugin.hh" namespace Rivet { /// @brief ZEUS inclusive jet photoproduction study used to measure alpha_s /// /// @author Jon Butterworth class ZEUS_2012_I1116258 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ZEUS_2012_I1116258); /// @name Analysis methods //@{ // Book projections and histograms void init() { // Projections // Jet schemes checked with oringal code, M.Wing, A.Geiser FinalState fs; double jet_radius = 1.0; declare(FastJets(fs, fastjet::JetAlgorithm::kt_algorithm, fastjet::RecombinationScheme::Et_scheme, jet_radius), "Jets"); declare(FastJets(fs, fastjet::JetAlgorithm::antikt_algorithm, fastjet::RecombinationScheme::Et_scheme, jet_radius), "Jets_akt"); // bit of messing about to use the correct recombnation scheme for SISCone. double overlap_threshold = 0.75; fastjet::SISConePlugin * plugin = new fastjet::SISConePlugin(jet_radius, overlap_threshold); plugin->set_use_jet_def_recombiner(true); JetDefinition siscone(plugin); siscone.set_recombination_scheme(fastjet::RecombinationScheme::Et_scheme); declare(FastJets(fs, siscone), "Jets_sis"); declare(DISKinematics(), "Kinematics"); // all eta - _h_etjet[0] = bookHisto1D(1, 1, 1); + book(_h_etjet[0], 1, 1, 1); // two ET cuts. - _h_etajet[0] = bookHisto1D(2, 1, 1); - _h_etajet[1] = bookHisto1D(3, 1, 1); + book(_h_etajet[0], 2, 1, 1); + book(_h_etajet[1], 3, 1, 1); // in eta regions - _h_etjet[1] = bookHisto1D(4, 1, 1); - _h_etjet[2] = bookHisto1D(5, 1, 1); - _h_etjet[3] = bookHisto1D(6, 1, 1); - _h_etjet[4] = bookHisto1D(7, 1, 1); - _h_etjet[5] = bookHisto1D(8, 1, 1); + book(_h_etjet[1], 4, 1, 1); + book(_h_etjet[2], 5, 1, 1); + book(_h_etjet[3], 6, 1, 1); + book(_h_etjet[4], 7, 1, 1); + book(_h_etjet[5], 8, 1, 1); // antiKT - _h_etjet[6] = bookHisto1D(9, 1, 1); - _h_etajet[2] = bookHisto1D(11, 1, 1); + book(_h_etjet[6], 9, 1, 1); + book(_h_etajet[2], 11, 1, 1); // SiSCone - _h_etjet[7] = bookHisto1D(10, 1, 1); - _h_etajet[3] = bookHisto1D(12, 1, 1); + book(_h_etjet[7], 10, 1, 1); + book(_h_etajet[3], 12, 1, 1); } // Do the analysis void analyze(const Event& event) { // Determine kinematics, including event orientation since ZEUS coord system is for +z = proton direction const DISKinematics& kin = apply(event, "Kinematics"); const int orientation = kin.orientation(); // Q2 and inelasticity cuts if (kin.Q2() > 1*GeV2) vetoEvent; if (!inRange(sqrt(kin.W2()), 142.0, 293.0)) vetoEvent; // Jet selection // @TODO check the recombination scheme const Jets jets = apply(event, "Jets") \ .jets(Cuts::Et > 17*GeV && Cuts::etaIn(-1*orientation, 2.5*orientation), cmpMomByEt); MSG_DEBUG("kT Jet multiplicity = " << jets.size()); const Jets jets_akt = apply(event, "Jets_akt") \ .jets(Cuts::Et > 17*GeV && Cuts::etaIn(-1*orientation, 2.5*orientation), cmpMomByEt); const Jets jets_sis = apply(event, "Jets_sis") \ .jets(Cuts::Et > 17*GeV && Cuts::etaIn(-1*orientation, 2.5*orientation), cmpMomByEt); // Fill histograms - const double weight = event.weight(); - for (const Jet& jet : jets ){ - _h_etjet[0]->fill(jet.pt(), weight); - _h_etajet[0]->fill(orientation*jet.eta(), weight); + _h_etjet[0]->fill(jet.pt()); + _h_etajet[0]->fill(orientation*jet.eta()); if (jet.pt()>21*GeV) { - _h_etajet[1]->fill(orientation*jet.eta(), weight); + _h_etajet[1]->fill(orientation*jet.eta()); } if (orientation*jet.eta() < 0) { - _h_etjet[1]->fill(jet.pt(), weight); + _h_etjet[1]->fill(jet.pt()); } else if (orientation*jet.eta() < 1) { - _h_etjet[2]->fill(jet.pt(), weight); + _h_etjet[2]->fill(jet.pt()); } else if (orientation*jet.eta() < 1.5) { - _h_etjet[3]->fill(jet.pt(), weight); + _h_etjet[3]->fill(jet.pt()); } else if (orientation*jet.eta() < 2) { - _h_etjet[4]->fill(jet.pt(), weight); + _h_etjet[4]->fill(jet.pt()); } else { - _h_etjet[5]->fill(jet.pt(), weight); + _h_etjet[5]->fill(jet.pt()); } } for (const Jet& jet : jets_akt ){ - _h_etjet[6]->fill(jet.pt(), weight); - _h_etajet[2]->fill(orientation*jet.eta(), weight); + _h_etjet[6]->fill(jet.pt()); + _h_etajet[2]->fill(orientation*jet.eta()); } for (const Jet& jet : jets_sis ){ - _h_etjet[7]->fill(jet.pt(), weight); - _h_etajet[3]->fill(orientation*jet.eta(), weight); + _h_etjet[7]->fill(jet.pt()); + _h_etajet[3]->fill(orientation*jet.eta()); } } // Finalize void finalize() { const double sf = crossSection()/picobarn/sumOfWeights(); for( int i = 0; i < 8; i++ ) { scale(_h_etjet[i], sf); } for( int i = 0; i < 4; i++ ) { scale(_h_etajet[i], sf); } } //@} private: /// @name Histograms //@{ Histo1DPtr _h_etjet[8], _h_etajet[4]; //@} }; DECLARE_RIVET_PLUGIN(ZEUS_2012_I1116258); } diff --git a/analyses/pluginLEP/ALEPH_1996_S3486095.cc b/analyses/pluginLEP/ALEPH_1996_S3486095.cc --- a/analyses/pluginLEP/ALEPH_1996_S3486095.cc +++ b/analyses/pluginLEP/ALEPH_1996_S3486095.cc @@ -1,497 +1,477 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief ALEPH QCD study with event shapes and identified particles /// @author Holger Schulz class ALEPH_1996_S3486095 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALEPH_1996_S3486095); /// @name Analysis methods //@{ void init() { // Set up projections declare(Beam(), "Beams"); const ChargedFinalState cfs; declare(cfs, "FS"); declare(UnstableParticles(), "UFS"); declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets"); declare(Sphericity(cfs), "Sphericity"); declare(ParisiTensor(cfs), "Parisi"); const Thrust thrust(cfs); declare(thrust, "Thrust"); declare(Hemispheres(thrust), "Hemispheres"); // Book histograms book(_histSphericity ,1, 1, 1); book(_histAplanarity ,2, 1, 1); book(_hist1MinusT ,3, 1, 1); book(_histTMinor ,4, 1, 1); book(_histY3 ,5, 1, 1); book(_histHeavyJetMass ,6, 1, 1); book(_histCParam ,7, 1, 1); book(_histOblateness ,8, 1, 1); book(_histScaledMom ,9, 1, 1); book(_histRapidityT ,10, 1, 1); book(_histPtSIn ,11, 1, 1); book(_histPtSOut ,12, 1, 1); book(_histLogScaledMom ,17, 1, 1); book(_histChMult ,18, 1, 1); book(_histMeanChMult ,19, 1, 1); book(_histMeanChMultRapt05,20, 1, 1); book(_histMeanChMultRapt10,21, 1, 1); book(_histMeanChMultRapt15,22, 1, 1); book(_histMeanChMultRapt20,23, 1, 1); // Particle spectra book(_histMultiPiPlus ,25, 1, 1); book(_histMultiKPlus ,26, 1, 1); book(_histMultiP ,27, 1, 1); book(_histMultiPhoton ,28, 1, 1); book(_histMultiPi0 ,29, 1, 1); book(_histMultiEta ,30, 1, 1); book(_histMultiEtaPrime ,31, 1, 1); book(_histMultiK0 ,32, 1, 1); book(_histMultiLambda0 ,33, 1, 1); book(_histMultiXiMinus ,34, 1, 1); book(_histMultiSigma1385Plus ,35, 1, 1); book(_histMultiXi1530_0 ,36, 1, 1); book(_histMultiRho ,37, 1, 1); book(_histMultiOmega782 ,38, 1, 1); book(_histMultiKStar892_0 ,39, 1, 1); book(_histMultiPhi ,40, 1, 1); book(_histMultiKStar892Plus ,43, 1, 1); // Mean multiplicities book(_histMeanMultiPi0 ,44, 1, 2); book(_histMeanMultiEta ,44, 1, 3); book(_histMeanMultiEtaPrime ,44, 1, 4); book(_histMeanMultiK0 ,44, 1, 5); book(_histMeanMultiRho ,44, 1, 6); book(_histMeanMultiOmega782 ,44, 1, 7); book(_histMeanMultiPhi ,44, 1, 8); book(_histMeanMultiKStar892Plus ,44, 1, 9); book(_histMeanMultiKStar892_0 ,44, 1, 10); book(_histMeanMultiLambda0 ,44, 1, 11); book(_histMeanMultiSigma0 ,44, 1, 12); book(_histMeanMultiXiMinus ,44, 1, 13); book(_histMeanMultiSigma1385Plus ,44, 1, 14); book(_histMeanMultiXi1530_0 ,44, 1, 15); book(_histMeanMultiOmegaOmegaBar ,44, 1, 16); - book(_weightedTotalPartNum, "weightedTotalPartNum"); - book(_weightedTotalNumPiPlus, "weightedTotalNumPiPlus"); - book(_weightedTotalNumKPlus, "weightedTotalNumKPlus"); - book(_weightedTotalNumP, "weightedTotalNumP"); - book(_weightedTotalNumPhoton, "weightedTotalNumPhoton"); - book(_weightedTotalNumPi0, "weightedTotalNumPi0"); - book(_weightedTotalNumEta, "weightedTotalNumEta"); - book(_weightedTotalNumEtaPrime, "weightedTotalNumEtaPrime"); - book(_weightedTotalNumK0, "weightedTotalNumK0"); - book(_weightedTotalNumLambda0, "weightedTotalNumLambda0"); - book(_weightedTotalNumXiMinus, "weightedTotalNumXiMinus"); - book(_weightedTotalNumSigma1385Plus, "weightedTotalNumSigma1385Plus"); - book(_weightedTotalNumXi1530_0, "weightedTotalNumXi1530_0"); - book(_weightedTotalNumRho, "weightedTotalNumRho"); - book(_weightedTotalNumOmega782, "weightedTotalNumOmega782"); - book(_weightedTotalNumKStar892_0, "weightedTotalNumKStar892_0"); - book(_weightedTotalNumPhi, "weightedTotalNumPhi"); - book(_weightedTotalNumKStar892Plus, "weightedTotalNumKStar892Plus"); - book(_numChParticles, "numChParticles"); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); _weightedTotalPartNum->fill(numParticles); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Thrusts MSG_DEBUG("Calculating thrust"); const Thrust& thrust = apply(e, "Thrust"); _hist1MinusT->fill(1 - thrust.thrust()); _histTMinor->fill(thrust.thrustMinor()); _histOblateness->fill(thrust.oblateness()); // Jets MSG_DEBUG("Calculating differential jet rate plots:"); const FastJets& durjet = apply(e, "DurhamJets"); if (durjet.clusterSeq()) { double y3 = durjet.clusterSeq()->exclusive_ymerge_max(2); if (y3>0.0) _histY3->fill(-1. * std::log(y3)); } // Sphericities MSG_DEBUG("Calculating sphericity"); const Sphericity& sphericity = apply(e, "Sphericity"); _histSphericity->fill(sphericity.sphericity()); _histAplanarity->fill(sphericity.aplanarity()); // C param MSG_DEBUG("Calculating Parisi params"); const ParisiTensor& parisi = apply(e, "Parisi"); _histCParam->fill(parisi.C()); // Hemispheres MSG_DEBUG("Calculating hemisphere variables"); const Hemispheres& hemi = apply(e, "Hemispheres"); _histHeavyJetMass->fill(hemi.scaledM2high()); // Iterate over all the charged final state particles. double Evis = 0.0; double rapt05 = 0.; double rapt10 = 0.; double rapt15 = 0.; double rapt20 = 0.; MSG_DEBUG("About to iterate over charged FS particles"); for (const Particle& p : fs.particles()) { // Get momentum and energy of each particle. const Vector3 mom3 = p.p3(); const double energy = p.E(); Evis += energy; - _numChParticles->fill(); // Scaled momenta. const double mom = mom3.mod(); const double scaledMom = mom/meanBeamMom; const double logInvScaledMom = -std::log(scaledMom); _histLogScaledMom->fill(logInvScaledMom); _histScaledMom->fill(scaledMom); // Get momenta components w.r.t. thrust and sphericity. const double momT = dot(thrust.thrustAxis(), mom3); const double pTinS = dot(mom3, sphericity.sphericityMajorAxis()); const double pToutS = dot(mom3, sphericity.sphericityMinorAxis()); _histPtSIn->fill(fabs(pTinS/GeV)); _histPtSOut->fill(fabs(pToutS/GeV)); // Calculate rapidities w.r.t. thrust. const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT)); _histRapidityT->fill(fabs(rapidityT)); if (std::fabs(rapidityT) <= 0.5) { rapt05 += 1.0; } if (std::fabs(rapidityT) <= 1.0) { rapt10 += 1.0; } if (std::fabs(rapidityT) <= 1.5) { rapt15 += 1.0; } if (std::fabs(rapidityT) <= 2.0) { rapt20 += 1.0; } } _histChMult->fill(numParticles); _histMeanChMultRapt05->fill(_histMeanChMultRapt05->bin(0).xMid(), rapt05); _histMeanChMultRapt10->fill(_histMeanChMultRapt10->bin(0).xMid(), rapt10); _histMeanChMultRapt15->fill(_histMeanChMultRapt15->bin(0).xMid(), rapt15); _histMeanChMultRapt20->fill(_histMeanChMultRapt20->bin(0).xMid(), rapt20); _histMeanChMult->fill(_histMeanChMult->bin(0).xMid(), numParticles); //// Final state of unstable particles to get particle spectra const UnstableParticles& ufs = apply(e, "UFS"); for (Particles::const_iterator p = ufs.particles().begin(); p != ufs.particles().end(); ++p) { const Vector3 mom3 = p->momentum().p3(); int id = abs(p->pid()); const double mom = mom3.mod(); const double energy = p->momentum().E(); const double scaledMom = mom/meanBeamMom; const double scaledEnergy = energy/meanBeamMom; // meanBeamMom is approximately beam energy switch (id) { case 22: _histMultiPhoton->fill(-1.*std::log(scaledMom)); break; case -321: case 321: _histMultiKPlus->fill(scaledMom); break; case 211: case -211: _histMultiPiPlus->fill(scaledMom); break; case 2212: case -2212: _histMultiP->fill(scaledMom); break; case 111: _histMultiPi0->fill(scaledMom); _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid()); break; case 221: if (scaledMom >= 0.1) { _histMultiEta->fill(scaledEnergy); _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid()); } break; case 331: if (scaledMom >= 0.1) { _histMultiEtaPrime->fill(scaledEnergy); _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid()); } break; case 130: //klong case 310: //kshort _histMultiK0->fill(scaledMom); _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid()); break; case 113: _histMultiRho->fill(scaledMom); _histMeanMultiRho->fill(_histMeanMultiRho->bin(0).xMid()); break; case 223: _histMultiOmega782->fill(scaledMom); _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid()); break; case 333: _histMultiPhi->fill(scaledMom); _histMeanMultiPhi->fill(_histMeanMultiPhi->bin(0).xMid()); break; case 313: case -313: _histMultiKStar892_0->fill(scaledMom); _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid()); break; case 323: case -323: _histMultiKStar892Plus->fill(scaledEnergy); _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid()); break; case 3122: case -3122: _histMultiLambda0->fill(scaledMom); _histMeanMultiLambda0->fill(_histMeanMultiLambda0->bin(0).xMid()); break; case 3212: case -3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid()); break; case 3312: case -3312: _histMultiXiMinus->fill(scaledEnergy); _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid()); break; case 3114: case -3114: case 3224: case -3224: _histMultiSigma1385Plus->fill(scaledEnergy); _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid()); break; case 3324: case -3324: _histMultiXi1530_0->fill(scaledEnergy); _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid()); break; case 3334: _histMeanMultiOmegaOmegaBar->fill(_histMeanMultiOmegaOmegaBar->bin(0).xMid()); break; } } } /// Finalize void finalize() { // Normalize inclusive single particle distributions to the average number // of charged particles per event. - const double avgNumParts = dbl(*_weightedTotalPartNum) / sumOfWeights(); + const double avgNumParts = _weightedTotalPartNum->sumW() / sumOfWeights(); normalize(_histPtSIn, avgNumParts); normalize(_histPtSOut, avgNumParts); normalize(_histRapidityT, avgNumParts); normalize(_histY3); normalize(_histLogScaledMom, avgNumParts); normalize(_histScaledMom, avgNumParts); // particle spectra scale(_histMultiPiPlus ,1./sumOfWeights()); scale(_histMultiKPlus ,1./sumOfWeights()); scale(_histMultiP ,1./sumOfWeights()); scale(_histMultiPhoton ,1./sumOfWeights()); scale(_histMultiPi0 ,1./sumOfWeights()); scale(_histMultiEta ,1./sumOfWeights()); scale(_histMultiEtaPrime ,1./sumOfWeights()); scale(_histMultiK0 ,1./sumOfWeights()); scale(_histMultiLambda0 ,1./sumOfWeights()); scale(_histMultiXiMinus ,1./sumOfWeights()); scale(_histMultiSigma1385Plus ,1./sumOfWeights()); scale(_histMultiXi1530_0 ,1./sumOfWeights()); scale(_histMultiRho ,1./sumOfWeights()); scale(_histMultiOmega782 ,1./sumOfWeights()); scale(_histMultiKStar892_0 ,1./sumOfWeights()); scale(_histMultiPhi ,1./sumOfWeights()); scale(_histMultiKStar892Plus ,1./sumOfWeights()); // event shape normalize(_hist1MinusT); normalize(_histTMinor); normalize(_histOblateness); normalize(_histSphericity); normalize(_histAplanarity); normalize(_histHeavyJetMass); normalize(_histCParam); // mean multiplicities scale(_histChMult , 2.0/sumOfWeights()); // taking into account the binwidth of 2 scale(_histMeanChMult , 1.0/sumOfWeights()); scale(_histMeanChMultRapt05 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt10 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt15 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt20 , 1.0/sumOfWeights()); scale(_histMeanMultiPi0 , 1.0/sumOfWeights()); scale(_histMeanMultiEta , 1.0/sumOfWeights()); scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights()); scale(_histMeanMultiK0 , 1.0/sumOfWeights()); scale(_histMeanMultiRho , 1.0/sumOfWeights()); scale(_histMeanMultiOmega782 , 1.0/sumOfWeights()); scale(_histMeanMultiPhi , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights()); scale(_histMeanMultiLambda0 , 1.0/sumOfWeights()); scale(_histMeanMultiSigma0 , 1.0/sumOfWeights()); scale(_histMeanMultiXiMinus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Plus, 1.0/sumOfWeights()); scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights()); scale(_histMeanMultiOmegaOmegaBar, 1.0/sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. CounterPtr _weightedTotalPartNum; /// @name Histograms //@{ Histo1DPtr _histSphericity; Histo1DPtr _histAplanarity; Histo1DPtr _hist1MinusT; Histo1DPtr _histTMinor; Histo1DPtr _histY3; Histo1DPtr _histHeavyJetMass; Histo1DPtr _histCParam; Histo1DPtr _histOblateness; Histo1DPtr _histScaledMom; Histo1DPtr _histRapidityT; Histo1DPtr _histPtSIn; Histo1DPtr _histPtSOut; Histo1DPtr _histJetRate2Durham; Histo1DPtr _histJetRate3Durham; Histo1DPtr _histJetRate4Durham; Histo1DPtr _histJetRate5Durham; Histo1DPtr _histLogScaledMom; Histo1DPtr _histChMult; Histo1DPtr _histMultiPiPlus; Histo1DPtr _histMultiKPlus; Histo1DPtr _histMultiP; Histo1DPtr _histMultiPhoton; Histo1DPtr _histMultiPi0; Histo1DPtr _histMultiEta; Histo1DPtr _histMultiEtaPrime; Histo1DPtr _histMultiK0; Histo1DPtr _histMultiLambda0; Histo1DPtr _histMultiXiMinus; Histo1DPtr _histMultiSigma1385Plus; Histo1DPtr _histMultiXi1530_0; Histo1DPtr _histMultiRho; Histo1DPtr _histMultiOmega782; Histo1DPtr _histMultiKStar892_0; Histo1DPtr _histMultiPhi; Histo1DPtr _histMultiKStar892Plus; // mean multiplicities Histo1DPtr _histMeanChMult; Histo1DPtr _histMeanChMultRapt05; Histo1DPtr _histMeanChMultRapt10; Histo1DPtr _histMeanChMultRapt15; Histo1DPtr _histMeanChMultRapt20; Histo1DPtr _histMeanMultiPi0; Histo1DPtr _histMeanMultiEta; Histo1DPtr _histMeanMultiEtaPrime; Histo1DPtr _histMeanMultiK0; Histo1DPtr _histMeanMultiRho; Histo1DPtr _histMeanMultiOmega782; Histo1DPtr _histMeanMultiPhi; Histo1DPtr _histMeanMultiKStar892Plus; Histo1DPtr _histMeanMultiKStar892_0; Histo1DPtr _histMeanMultiLambda0; Histo1DPtr _histMeanMultiSigma0; Histo1DPtr _histMeanMultiXiMinus; Histo1DPtr _histMeanMultiSigma1385Plus; Histo1DPtr _histMeanMultiXi1530_0; Histo1DPtr _histMeanMultiOmegaOmegaBar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_1996_S3486095); } diff --git a/analyses/pluginLEP/ALEPH_2014_I1267648.cc b/analyses/pluginLEP/ALEPH_2014_I1267648.cc --- a/analyses/pluginLEP/ALEPH_2014_I1267648.cc +++ b/analyses/pluginLEP/ALEPH_2014_I1267648.cc @@ -1,118 +1,118 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class ALEPH_2014_I1267648 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALEPH_2014_I1267648); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(), "UFS"); // Book histograms book(_h_pip0 , 1, 1, 1); book(_h_pi2p0, 2, 1, 1); book(_h_pi3p0, 3, 1, 1); book(_h_3pi , 4, 1, 1); book(_h_3pip0, 5, 1, 1); } void findDecayProducts(const Particle &mother, unsigned int &nstable, unsigned int &npip, unsigned int &npim, unsigned int &npi0, FourMomentum &ptot) { for (const Particle &p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if (id == PID::KPLUS || id == PID::KMINUS) { ++nstable; ptot += p.momentum(); } else if (id == PID::PIPLUS) { ++npip; ++nstable; ptot += p.momentum(); } else if (id == PID::PIMINUS) { ++npim; ++nstable; ptot += p.momentum(); } else if (id == PID::PI0) { ++nstable; ++npi0; ptot += p.momentum(); } else if (id == PID::PHOTON) continue; else if (!p.children().empty()) findDecayProducts(p, nstable, npip, npim, npi0, ptot); else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over taus for (const Particle& tau : apply(event, "UFS").particles(Cuts::abspid==PID::TAU)) { FourMomentum ptot; unsigned int nstable(0), npip(0), npim(0), npi0(0); findDecayProducts(tau,nstable,npip,npim,npi0,ptot); // tau -> pi pi0 nu_tau (both charges) if (npim==1 && npi0==1 && nstable==3) _h_pip0->fill(ptot.mass2()); // tau -> pi pi0 pi0 nu_tau (both charges) else if (npim==1 && npi0==2 && nstable==4) _h_pi2p0->fill(ptot.mass2()); // tau -> pi pi0 pi0 pi0 (3,1,1) else if (npim==1 && npi0==3 && nstable==5) _h_pi3p0->fill(ptot.mass2()); // tau -> 3 charged pions (4,1,1) else if (npim==2 && npip==1 && nstable==4) _h_3pi->fill(ptot.mass2()); // tau -> 3 charged pions + pi0 (5,1,1) else if (npim==2 && npip==1 && npi0==1 && nstable==5) _h_3pip0->fill(ptot.mass2()); } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_pip0); // normalize to unity normalize(_h_pi2p0); // normalize to unity normalize(_h_pi3p0); // normalize to unity normalize(_h_3pi); // normalize to unity normalize(_h_3pip0); // normalize to unity } //@} private: /// @name Histograms //@{ Histo1DPtr _h_pip0; Histo1DPtr _h_pi2p0; Histo1DPtr _h_pi3p0; Histo1DPtr _h_3pi; Histo1DPtr _h_3pip0; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_2014_I1267648); } diff --git a/analyses/pluginLEP/DELPHI_2000_I524694.cc b/analyses/pluginLEP/DELPHI_2000_I524694.cc --- a/analyses/pluginLEP/DELPHI_2000_I524694.cc +++ b/analyses/pluginLEP/DELPHI_2000_I524694.cc @@ -1,81 +1,80 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class DELPHI_2000_I524694 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_2000_I524694); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { declare(Beam(), "Beams"); declare(UnstableParticles(), "UFS"); - _histXpSigma = bookHisto1D( 1, 1, 1); - _histXpLambda = bookHisto1D( 3, 1, 1); + book(_histXpSigma, 1, 1, 1); + book(_histXpLambda, 3, 1, 1); } /// Perform the per-event analysis void analyze(const Event& event) { // Get event weight for histo filling - const double weight = event.weight(); // Get beams and average beam momentum const ParticlePair& beams = apply(event, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; const UnstableParticles& ufs = apply(event, "UFS"); - foreach (const Particle& p, ufs.particles()) { + for (const Particle& p : ufs.particles()) { const int id = p.abspid(); double xp = p.p3().mod()/meanBeamMom; switch (id) { case 3112: - _histXpSigma->fill(xp, weight); + _histXpSigma->fill(xp); break; case 3124: - _histXpLambda->fill(xp, weight); + _histXpLambda->fill(xp); break; } } } /// Normalise histograms etc., after the run void finalize() { double fact = 1./sumOfWeights(); scale(_histXpSigma , fact); scale(_histXpLambda, fact); } //@} /// @name Histograms //@{ Histo1DPtr _histXpSigma; Histo1DPtr _histXpLambda; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_2000_I524694); } diff --git a/analyses/pluginLEP/DELPHI_2003_I620250.cc b/analyses/pluginLEP/DELPHI_2003_I620250.cc --- a/analyses/pluginLEP/DELPHI_2003_I620250.cc +++ b/analyses/pluginLEP/DELPHI_2003_I620250.cc @@ -1,152 +1,151 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { /// @brief DELPHI event shapes below the Z pole class DELPHI_2003_I620250 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_2003_I620250); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections const FinalState fs; declare(fs, "FS"); const Thrust thrust(fs); declare(thrust, "Thrust"); declare(Sphericity(fs), "Sphericity"); declare(ParisiTensor(fs), "Parisi"); declare(Hemispheres(thrust), "Hemispheres"); // find the beam energy unsigned int offset = 0; if (fuzzyEquals(sqrtS()/GeV, 45, 1E-3)) offset = 1; else if (fuzzyEquals(sqrtS()/GeV, 66, 1E-3)) offset = 2; else if (fuzzyEquals(sqrtS()/GeV, 76, 1E-3)) offset = 3; else MSG_ERROR("Beam energy not supported!"); // Book the histograms - _h_thrust = bookHisto1D( 1, 1, offset); - _h_major = bookHisto1D( 2, 1, offset); - _h_minor = bookHisto1D( 3, 1, offset); - _h_sphericity = bookHisto1D( 4, 1, offset); - _h_planarity = bookHisto1D( 5, 1, offset); - _h_oblateness = bookHisto1D( 6, 1, offset); - _h_heavy_jet_mass = bookHisto1D( 7, 1, offset); - _h_light_jet_mass = bookHisto1D( 9, 1, offset); - _h_diff_jet_mass = bookHisto1D(10, 1, offset); - _h_total_jet_mass = bookHisto1D(11, 1, offset); - _h_heavy_jet_mass_E = bookHisto1D( 8, 1, offset); - _h_total_jet_mass_E = bookHisto1D(12, 1, offset); - _h_wide_broading = bookHisto1D(13, 1, offset); - _h_narrow_broading = bookHisto1D(14, 1, offset); - _h_total_broading = bookHisto1D(15, 1, offset); - _h_diff_broading = bookHisto1D(16, 1, offset); + book(_h_thrust, 1, 1, offset); + book(_h_major, 2, 1, offset); + book(_h_minor, 3, 1, offset); + book(_h_sphericity, 4, 1, offset); + book(_h_planarity, 5, 1, offset); + book(_h_oblateness, 6, 1, offset); + book(_h_heavy_jet_mass, 7, 1, offset); + book(_h_light_jet_mass, 9, 1, offset); + book(_h_diff_jet_mass, 10, 1, offset); + book(_h_total_jet_mass, 11, 1, offset); + book(_h_heavy_jet_mass_E, 8, 1, offset); + book(_h_total_jet_mass_E, 12, 1, offset); + book(_h_wide_broading, 13, 1, offset); + book(_h_narrow_broading, 14, 1, offset); + book(_h_total_broading, 15, 1, offset); + book(_h_diff_broading, 16, 1, offset); } /// Perform the per-event analysis void analyze(const Event& event) { - const double weight = event.weight(); const Thrust& thrust = apply(event, "Thrust"); // thrust related observables - _h_thrust ->fill(1.-thrust.thrust() ,weight); - _h_major ->fill(thrust.thrustMajor(),weight); - _h_minor ->fill(thrust.thrustMinor(),weight); - _h_oblateness->fill(thrust.oblateness() ,weight); + _h_thrust ->fill(1.-thrust.thrust() ); + _h_major ->fill(thrust.thrustMajor()); + _h_minor ->fill(thrust.thrustMinor()); + _h_oblateness->fill(thrust.oblateness() ); // sphericity related const Sphericity& sphericity = apply(event, "Sphericity"); - _h_sphericity->fill(sphericity.sphericity(),weight); - _h_planarity ->fill(sphericity.planarity() ,weight); + _h_sphericity->fill(sphericity.sphericity()); + _h_planarity ->fill(sphericity.planarity() ); // hemisphere related const Hemispheres& hemi = apply(event, "Hemispheres"); // standard jet masses - _h_heavy_jet_mass->fill(hemi.scaledM2high(),weight); - _h_light_jet_mass->fill(hemi.scaledM2low() ,weight); - _h_diff_jet_mass ->fill(hemi.scaledM2diff(),weight); - _h_total_jet_mass->fill(hemi.scaledM2low()+hemi.scaledM2high(),weight); + _h_heavy_jet_mass->fill(hemi.scaledM2high()); + _h_light_jet_mass->fill(hemi.scaledM2low() ); + _h_diff_jet_mass ->fill(hemi.scaledM2diff()); + _h_total_jet_mass->fill(hemi.scaledM2low()+hemi.scaledM2high()); // jet broadening - _h_wide_broading ->fill(hemi.Bmax() ,weight); - _h_narrow_broading->fill(hemi.Bmin() ,weight); - _h_total_broading ->fill(hemi.Bsum() ,weight); - _h_diff_broading ->fill(hemi.Bdiff(),weight); + _h_wide_broading ->fill(hemi.Bmax() ); + _h_narrow_broading->fill(hemi.Bmin() ); + _h_total_broading ->fill(hemi.Bsum() ); + _h_diff_broading ->fill(hemi.Bdiff()); // E scheme jet masses Vector3 axis = thrust.thrustAxis(); FourMomentum p4With, p4Against; double Evis(0); - foreach(const Particle& p, apply(event, "FS").particles()) { + for (const Particle& p : apply(event, "FS").particles()) { Vector3 p3 = p.momentum().vector3().unitVec(); const double E = p.momentum().E(); Evis += E; p3 = E*p3; const double p3Para = dot(p3, axis); FourMomentum p4(E,p3.x(),p3.y(),p3.z()); if (p3Para > 0) p4With += p4; else if (p3Para < 0) p4Against += p4; else { MSG_WARNING("Particle split between hemispheres"); p4With += 0.5 * p4; p4Against += 0.5 * p4; } } const double mass2With = p4With.mass2()/sqr(Evis); const double mass2Against = p4Against.mass2()/sqr(Evis); // fill the histograms - _h_heavy_jet_mass_E->fill(max(mass2With,mass2Against),weight); - _h_total_jet_mass_E->fill(mass2With+mass2Against,weight); + _h_heavy_jet_mass_E->fill(max(mass2With,mass2Against)); + _h_total_jet_mass_E->fill(mass2With+mass2Against); } /// Normalise histograms etc., after the run void finalize() { normalize(_h_thrust ); normalize(_h_major ); normalize(_h_minor ); normalize(_h_sphericity ); normalize(_h_planarity ); normalize(_h_oblateness ); normalize(_h_heavy_jet_mass ); normalize(_h_light_jet_mass ); normalize(_h_diff_jet_mass ); normalize(_h_total_jet_mass ); normalize(_h_heavy_jet_mass_E); normalize(_h_total_jet_mass_E); normalize(_h_wide_broading ); normalize(_h_narrow_broading ); normalize(_h_total_broading ); normalize(_h_diff_broading ); } //@} /// @name Histograms //@{ Histo1DPtr _h_thrust,_h_major,_h_minor; Histo1DPtr _h_sphericity,_h_planarity,_h_oblateness; Histo1DPtr _h_heavy_jet_mass,_h_light_jet_mass,_h_diff_jet_mass,_h_total_jet_mass; Histo1DPtr _h_heavy_jet_mass_E,_h_total_jet_mass_E; Histo1DPtr _h_wide_broading,_h_narrow_broading,_h_total_broading,_h_diff_broading; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_2003_I620250); } diff --git a/analyses/pluginLEP/OPAL_1997_S3396100.cc b/analyses/pluginLEP/OPAL_1997_S3396100.cc --- a/analyses/pluginLEP/OPAL_1997_S3396100.cc +++ b/analyses/pluginLEP/OPAL_1997_S3396100.cc @@ -1,143 +1,143 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL strange baryon paper /// @author Peter Richardson class OPAL_1997_S3396100 : public Analysis { public: /// Constructor OPAL_1997_S3396100() : Analysis("OPAL_1997_S3396100") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); declare(UnstableParticles(), "UFS"); book(_histXpLambda , 1, 1, 1); book(_histXiLambda , 2, 1, 1); book(_histXpXiMinus , 3, 1, 1); book(_histXiXiMinus , 4, 1, 1); book(_histXpSigma1385Plus , 5, 1, 1); book(_histXiSigma1385Plus , 6, 1, 1); book(_histXpSigma1385Minus , 7, 1, 1); book(_histXiSigma1385Minus , 8, 1, 1); book(_histXpXi1530 , 9, 1, 1); book(_histXiXi1530 ,10, 1, 1); book(_histXpLambda1520 ,11, 1, 1); book(_histXiLambda1520 ,12, 1, 1); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra const UnstableParticles& ufs = apply(e, "UFS"); for (const Particle& p : ufs.particles()) { const int id = p.abspid(); double xp = p.p3().mod()/meanBeamMom; double xi = -log(xp); switch (id) { case 3312: - _histXpXiMinus->fill(xp, weight); - _histXiXiMinus->fill(xi, weight); + _histXpXiMinus->fill(xp); + _histXiXiMinus->fill(xi); break; case 3224: - _histXpSigma1385Plus->fill(xp, weight); - _histXiSigma1385Plus->fill(xi, weight); + _histXpSigma1385Plus->fill(xp); + _histXiSigma1385Plus->fill(xi); break; case 3114: - _histXpSigma1385Minus->fill(xp, weight); - _histXiSigma1385Minus->fill(xi, weight); + _histXpSigma1385Minus->fill(xp); + _histXiSigma1385Minus->fill(xi); break; case 3122: - _histXpLambda->fill(xp, weight); - _histXiLambda->fill(xi, weight); + _histXpLambda->fill(xp); + _histXiLambda->fill(xi); break; case 3324: - _histXpXi1530->fill(xp, weight); - _histXiXi1530->fill(xi, weight); + _histXpXi1530->fill(xp); + _histXiXi1530->fill(xi); break; case 3124: - _histXpLambda1520->fill(xp, weight); - _histXiLambda1520->fill(xi, weight); + _histXpLambda1520->fill(xp); + _histXiLambda1520->fill(xi); break; } } } /// Finalize void finalize() { double fact=1./sumOfWeights(); scale(_histXpLambda , fact); scale(_histXiLambda , fact); scale(_histXpXiMinus , fact); scale(_histXiXiMinus , fact); scale(_histXpSigma1385Plus , fact); scale(_histXiSigma1385Plus , fact); scale(_histXpSigma1385Minus, fact); scale(_histXiSigma1385Minus, fact); scale(_histXpXi1530 , fact); scale(_histXiXi1530 , fact); scale(_histXpLambda1520 , fact); scale(_histXiLambda1520 , fact); } //@} private: Histo1DPtr _histXpLambda ; Histo1DPtr _histXiLambda ; Histo1DPtr _histXpXiMinus ; Histo1DPtr _histXiXiMinus ; Histo1DPtr _histXpSigma1385Plus ; Histo1DPtr _histXiSigma1385Plus ; Histo1DPtr _histXpSigma1385Minus; Histo1DPtr _histXiSigma1385Minus; Histo1DPtr _histXpXi1530 ; Histo1DPtr _histXiXi1530 ; Histo1DPtr _histXpLambda1520 ; Histo1DPtr _histXiLambda1520 ; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1997_S3396100); } diff --git a/analyses/pluginLEP/OPAL_2002_S5361494.cc b/analyses/pluginLEP/OPAL_2002_S5361494.cc --- a/analyses/pluginLEP/OPAL_2002_S5361494.cc +++ b/analyses/pluginLEP/OPAL_2002_S5361494.cc @@ -1,166 +1,161 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// @brief OPAL multiplicities at various energies /// @author Peter Richardson class OPAL_2002_S5361494 : public Analysis { public: /// Constructor OPAL_2002_S5361494() : Analysis("OPAL_2002_S5361494") {} /// @name Analysis methods //@{ void init() { // Projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "CFS"); declare(InitialQuarks(), "IQF"); - _cLight = bookCounter("TMP/CLIGHT" ); - _wLight = bookCounter("TMP/WLIGHT" ); - _cCharm = bookCounter("TMP/CCHARM" ); - _wCharm = bookCounter("TMP/WCHARM" ); - _cBottom = bookCounter("TMP/CBOTTOM"); - _wBottom = bookCounter("TMP/WBOTTOM"); + book(_cLight, "TMP/CLIGHT" ); + book(_wLight, "TMP/WLIGHT" ); + book(_cCharm, "TMP/CCHARM" ); + book(_wCharm, "TMP/WCHARM" ); + book(_cBottom, "TMP/CBOTTOM"); + book(_wBottom, "TMP/WBOTTOM"); - book(_s_hists[0], 1, 1, 1); - book(_s_hists[1], 1, 1, 2); - book(_s_hists[2], 1, 1, 3); - book(_s_hists[3], 1, 1, 4); //< bottom minus light } void analyze(const Event& event) { // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. const FinalState& cfs = apply(event, "CFS"); if (cfs.size() < 2) vetoEvent; int flavour = 0; const InitialQuarks& iqf = apply(event, "IQF"); // If we only have two quarks (qqbar), just take the flavour. // If we have more than two quarks, look for the highest energetic q-qbar pair. if (iqf.particles().size() == 2) { flavour = iqf.particles().front().abspid(); } else { map quarkmap; for (const Particle& p : iqf.particles()) { if (quarkmap[p.pid()] < p.E()) { quarkmap[p.pid()] = p.E(); } } double maxenergy = 0.; for (int i = 1; i <= 5; ++i) { if (quarkmap[i]+quarkmap[-i] > maxenergy) { flavour = i; } } } const size_t numParticles = cfs.particles().size(); switch (flavour) { case 1: case 2: case 3: _wLight ->fill(); _cLight ->fill(numParticles); break; case 4: _wCharm ->fill(); _cCharm ->fill(numParticles); break; case 5: _wBottom->fill(); _cBottom->fill(numParticles); break; } } void finalize() { // calculate the averages and diffs if(_wLight ->numEntries()) scale( _cLight, 1./_wLight->val()); if(_wCharm ->numEntries()) scale( _cCharm, 1./_wCharm->val()); if(_wBottom->numEntries()) scale(_cBottom,1./_wBottom->val()); Counter _cDiff = *_cBottom - *_cLight; // fill the histograms for (unsigned int ix=1;ix<5;++ix) { double val(0.), err(0.0); if(ix==1) { val = _cBottom->val(); err = _cBottom->err(); } else if(ix==2) { val = _cCharm->val(); err = _cCharm->err(); } else if(ix==3) { val = _cLight->val(); err = _cLight->err(); } else if(ix==4) { val = _cDiff.val(); err = _cDiff.err(); } /// @todo TIDY! Scatter2D temphisto(refData(1, 1, ix)); + Scatter2DPtr mult; + book(mult, 1, 1, ix); for (size_t b = 0; b < temphisto.numPoints(); b++) { const double x = temphisto.point(b).x(); pair ex = temphisto.point(b).xErrs(); pair ex2 = ex; if(ex2.first ==0.) ex2. first=0.0001; if(ex2.second==0.) ex2.second=0.0001; if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) { - s_hists[ix]->addPoint(x, val, ex, make_pair(err,err)); + mult->addPoint(x, val, ex, make_pair(err,err)); } else { - s_hists[ix]->addPoint(x, 0., ex, make_pair(0.,.0)); + mult->addPoint(x, 0., ex, make_pair(0.,.0)); } } } } //@} private: - /// Final plots - Scatter2DPtr s_hists; - /// @name Multiplicities /// @todo Don't we have a Dbn1D-like type that can do both at once? //@{ CounterPtr _cLight, _wLight; CounterPtr _cCharm, _wCharm; CounterPtr _cBottom, _wBottom; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2002_S5361494); } diff --git a/analyses/pluginLEP/OPAL_2004_I631361.cc b/analyses/pluginLEP/OPAL_2004_I631361.cc --- a/analyses/pluginLEP/OPAL_2004_I631361.cc +++ b/analyses/pluginLEP/OPAL_2004_I631361.cc @@ -1,333 +1,362 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HadronicFinalState.hh" +#include "Rivet/Tools/BinnedHistogram.hh" #include "fastjet/JetDefinition.hh" +namespace fastjet { + +class P_scheme : public JetDefinition::Recombiner { + public: + std::string description() const {return "";} + void recombine(const PseudoJet & pa, const PseudoJet & pb, + PseudoJet & pab) const { + PseudoJet tmp = pa + pb; + double E = sqrt(tmp.px()*tmp.px() + tmp.py()*tmp.py() + tmp.pz()*tmp.pz()); + pab.reset_momentum(tmp.px(), tmp.py(), tmp.pz(), E); + } + void preprocess(PseudoJet & p) const { + double E = sqrt(p.px()*p.px() + p.py()*p.py() + p.pz()*p.pz()); + p.reset_momentum(p.px(), p.py(), p.pz(), E); + } + ~P_scheme() { } +}; + +} namespace Rivet { class OPAL_2004_I631361 : public Analysis { public: /// Constructor OPAL_2004_I631361() : Analysis("OPAL_2004_I631361"), _sumW(0.0) { } /// @name Analysis methods //@{ void init() { // Get options from the new option system _mode = 0; if ( getOption("PROCESS") == "GG" ) _mode = 0; if ( getOption("PROCESS") == "QQ" ) _mode = 1; // projections we need for both cases const FinalState fs; declare(fs, "FS"); const ChargedFinalState cfs; declare(cfs, "CFS"); // additional projections for q qbar if(_mode==1) { declare(HadronicFinalState(fs), "HFS"); declare(HadronicFinalState(cfs), "HCFS"); } // book the histograms if(_mode==0) { int ih(0), iy(0); if (inRange(0.5*sqrtS()/GeV, 5.0, 5.5)) { ih = 1; iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 5.5, 6.5)) { ih = 1; iy = 2; } else if (inRange(0.5*sqrtS()/GeV, 6.5, 7.5)) { ih = 1; iy = 3; } else if (inRange(0.5*sqrtS()/GeV, 7.5, 9.5)) { ih = 2; iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 9.5, 13.0)) { ih = 2; iy = 2; } else if (inRange(0.5*sqrtS()/GeV, 13.0, 16.0)) { ih = 3; iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 16.0, 20.0)) { ih = 3; iy = 2; } assert(ih>0); - _h_chMult = bookHisto1D(ih,1,iy); + book(_h_chMult_gg, ih,1,iy); if(ih==3) - _h_chFragFunc = bookHisto1D(5,1,iy); + book(_h_chFragFunc_gg, 5,1,iy); else - _h_chFragFunc = nullptr; - } + _h_chFragFunc_gg = nullptr; } else { - _h_chMult_qq.addHistogram(5.0, 5.5, bookHisto1D(1,1,1)); - _h_chMult_qq.addHistogram(5.5, 6.5, bookHisto1D(1,1,2)); - _h_chMult_qq.addHistogram(6.5, 7.5, bookHisto1D(1,1,3)); - _h_chMult_qq.addHistogram(7.5, 9.5, bookHisto1D(2,1,1)); - _h_chMult_qq.addHistogram(9.5, 13.0, bookHisto1D(2,1,2)); - _h_chMult_qq.addHistogram(13.0, 16.0, bookHisto1D(3,1,1)); - _h_chMult_qq.addHistogram(16.0, 20.0, bookHisto1D(3,1,2)); + Histo1DPtr dummy; + book(dummy, 1,1,1); + _h_chMult_qq.add(5.0, 5.5, dummy); + book(dummy, 1,1,2); + _h_chMult_qq.add(5.5, 6.5, dummy); + book(dummy, 1,1,3); + _h_chMult_qq.add(6.5, 7.5, dummy); + book(dummy, 2,1,1); + _h_chMult_qq.add(7.5, 9.5, dummy); + book(dummy, 2,1,2); + _h_chMult_qq.add(9.5, 13.0, dummy); + book(dummy, 3,1,1); + _h_chMult_qq.add(13.0, 16.0, dummy); + book(dummy, 3,1,2); + _h_chMult_qq.add(16.0, 20.0, dummy); - _h_chFragFunc_qq.addHistogram(13.0, 16.0, bookHisto1D(5,1,1)); - _h_chFragFunc_qq.addHistogram(16.0, 20.0, bookHisto1D(5,1,2)); + book(dummy, 5,1,1); + _h_chFragFunc_qq.add(13.0, 16.0, dummy); + book(dummy, 5,1,2); + _h_chFragFunc_qq.add(16.0, 20.0, dummy); } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // gg mode if(_mode==0) { // find the initial gluons Particles initial; for (ConstGenParticlePtr p : HepMCUtils::particles(event.genEvent())) { ConstGenVertexPtr pv = p->production_vertex(); const PdgId pid = p->pdg_id(); if(pid!=21) continue; bool passed = false; for (ConstGenParticlePtr pp : HepMCUtils::particles(pv, Relatives::PARENTS)) { const PdgId ppid = abs(pp->pdg_id()); passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || ppid == PID::ZBOSON || ppid == PID::GAMMA); if(passed) break; } if(passed) initial.push_back(Particle(*p)); } if(initial.size()!=2) vetoEvent; // use the direction for the event axis Vector3 axis = initial[0].momentum().p3().unit(); // fill histograms const Particles& chps = applyProjection(event, "CFS").particles(); unsigned int nMult[2] = {0,0}; _sumW += 2.*weight; // distribution for (const Particle& p : chps) { double xE = 2.*p.E()/sqrtS(); if(_h_chFragFunc_gg) _h_chFragFunc_gg->fill(xE, weight); if(p.momentum().p3().dot(axis)>0.) ++nMult[0]; else ++nMult[1]; } // multiplicities in jet _h_chMult_gg->fill(nMult[0],weight); _h_chMult_gg->fill(nMult[1],weight); } // qqbar mode else { // cut on the number of charged particles const Particles& chParticles = applyProjection(event, "CFS").particles(); if(chParticles.size() < 5) vetoEvent; // cluster the jets const Particles& particles = applyProjection(event, "FS").particles(); fastjet::JetDefinition ee_kt_def(fastjet::ee_kt_algorithm, &p_scheme); PseudoJets pParticles; - foreach(Particle p, particles) { + for (Particle p : particles) { PseudoJet temp = p.pseudojet(); if(p.fromBottom()) { temp.set_user_index(5); } pParticles.push_back(temp); } fastjet::ClusterSequence cluster(pParticles, ee_kt_def); // rescale energys to just keep the directions of the jets // and keep track of b tags PseudoJets pJets = sorted_by_E(cluster.exclusive_jets_up_to(3)); if(pJets.size() < 3) vetoEvent; array dirs; for(int i=0; i<3; i++) { dirs[i] = Vector3(pJets[i].px(),pJets[i].py(),pJets[i].pz()).unit(); } array bTagged; Jets jets; for(int i=0; i<3; i++) { double Ejet = sqrtS()*sin(angle(dirs[(i+1)%3],dirs[(i+2)%3])) / (sin(angle(dirs[i],dirs[(i+1)%3])) + sin(angle(dirs[i],dirs[(i+2)%3])) + sin(angle(dirs[(i+1)%3],dirs[(i+2)%3]))); jets.push_back(FourMomentum(Ejet,Ejet*dirs[i].x(),Ejet*dirs[i].y(),Ejet*dirs[i].z())); bTagged[i] = false; - foreach(PseudoJet particle, pJets[i].constituents()) { + for (PseudoJet particle : pJets[i].constituents()) { if(particle.user_index() > 1 and !bTagged[i]) { bTagged[i] = true; } } } int QUARK1 = 0, QUARK2 = 1, GLUON = 2; if(jets[QUARK2].E() > jets[QUARK1].E()) swap(QUARK1, QUARK2); if(jets[GLUON].E() > jets[QUARK1].E()) swap(QUARK1, GLUON); if(!bTagged[QUARK2]) { if(!bTagged[GLUON]) vetoEvent; else swap(QUARK2, GLUON); } if(bTagged[GLUON]) vetoEvent; // exclude collinear or soft jets double k1 = jets[QUARK1].E()*min(angle(jets[QUARK1].momentum(),jets[QUARK2].momentum()), angle(jets[QUARK1].momentum(),jets[GLUON].momentum())); double k2 = jets[QUARK2].E()*min(angle(jets[QUARK2].momentum(),jets[QUARK1].momentum()), angle(jets[QUARK2].momentum(),jets[GLUON].momentum())); if(k1<8.0*GeV || k2<8.0*GeV) vetoEvent; double sqg = (jets[QUARK1].momentum()+jets[GLUON].momentum()).mass2(); double sgq = (jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); double s = (jets[QUARK1].momentum()+jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); double Eg = 0.5*sqrt(sqg*sgq/s); if(Eg < 5.0 || Eg > 20.0) { vetoEvent; } else if(Eg > 9.5) { //requirements for experimental reconstructability raise as energy raises if(!bTagged[QUARK1]) { vetoEvent; } } // all cuts applied, increment sum of weights _sumWEbin[getEbin(Eg)] += weight; // transform to frame with event in y-z and glue jet in z direction Matrix3 glueTOz(jets[GLUON].momentum().vector3(), Vector3(0,0,1)); Vector3 transQuark = glueTOz*jets[QUARK2].momentum().vector3(); Matrix3 quarksTOyz(Vector3(transQuark.x(), transQuark.y(), 0), Vector3(0,1,0)); // work out transformation to symmetric frame array x_cm; array x_cm_y; array x_cm_z; array x_pr; for(int i=0; i<3; i++) { x_cm[i] = 2*jets[i].E()/sqrt(s); Vector3 p_transf = quarksTOyz*glueTOz*jets[i].p3(); x_cm_y[i] = 2*p_transf.y()/sqrt(s); x_cm_z[i] = 2*p_transf.z()/sqrt(s); } x_pr[GLUON] = sqrt(4*(1-x_cm[QUARK1])*(1-x_cm[QUARK2])/(3+x_cm[GLUON])); x_pr[QUARK1] = x_pr[GLUON]/(1-x_cm[QUARK1]); x_pr[QUARK2] = x_pr[GLUON]/(1-x_cm[QUARK2]); double gamma = (x_pr[QUARK1] + x_pr[GLUON] + x_pr[QUARK2])/2; double beta_z = x_pr[GLUON]/(gamma*x_cm[GLUON]) - 1; double beta_y = (x_pr[QUARK2]/gamma - x_cm[QUARK2] - beta_z*x_cm_z[QUARK2])/x_cm_y[QUARK2]; LorentzTransform toSymmetric = LorentzTransform::mkObjTransformFromBeta(Vector3(0.,beta_y,beta_z)). postMult(quarksTOyz*glueTOz); FourMomentum transGlue = toSymmetric.transform(jets[GLUON].momentum()); double cutAngle = angle(toSymmetric.transform(jets[QUARK2].momentum()), transGlue)/2; int nCh = 0; - foreach(const Particle& chP, chParticles ) { + for (const Particle& chP : chParticles ) { FourMomentum pSymmFrame = toSymmetric.transform(FourMomentum(chP.p3().mod(), chP.px(), chP.py(), chP.pz())); if(angle(pSymmFrame, transGlue) < cutAngle) { _h_chFragFunc_qq.fill(Eg, pSymmFrame.E()*sin(cutAngle)/Eg, weight); nCh++; } } _h_chMult_qq.fill(Eg, nCh, weight); } } /// Normalise histograms etc., after the run void finalize() { if(_mode==0) { normalize(_h_chMult_gg); if(_h_chFragFunc_gg) scale(_h_chFragFunc_gg, 1./_sumW); } else { - for (Histo1DPtr hist : _h_chMult_qq.getHistograms()) { + for (Histo1DPtr hist : _h_chMult_qq.histos()) { normalize(hist); } for(int i=0; i<2; i++) { if(!isZero(_sumWEbin[i+5])) { - scale(_h_chFragFunc_qq.getHistograms()[i], 1./_sumWEbin[i+5]); + scale(_h_chFragFunc_qq.histos()[i], 1./_sumWEbin[i+5]); } } } } //@} private: int getEbin(double E_glue) { int ih = -1; if (inRange(E_glue/GeV, 5.0, 5.5)) { ih = 0; } else if (inRange(E_glue/GeV, 5.5, 6.5)) { ih = 1; } else if (inRange(E_glue/GeV, 6.5, 7.5)) { ih = 2; } else if (inRange(E_glue/GeV, 7.5, 9.5)) { ih = 3; } else if (inRange(E_glue/GeV, 9.5, 13.0)) { ih = 4; } else if (inRange(E_glue/GeV, 13.0, 16.0)) { ih = 5; } else if (inRange(E_glue/GeV, 16.0, 20.0)) { ih = 6; } assert(ih >= 0); return ih; } class PScheme : public JetDefinition::Recombiner { public: std::string description() const {return "";} void recombine(const PseudoJet & pa, const PseudoJet & pb, PseudoJet & pab) const { PseudoJet tmp = pa + pb; double E = sqrt(tmp.px()*tmp.px() + tmp.py()*tmp.py() + tmp.pz()*tmp.pz()); pab.reset_momentum(tmp.px(), tmp.py(), tmp.pz(), E); } void preprocess(PseudoJet & p) const { double E = sqrt(p.px()*p.px() + p.py()*p.py() + p.pz()*p.pz()); p.reset_momentum(p.px(), p.py(), p.pz(), E); } ~PScheme() { } }; private: // The mode unsigned int _mode; /// @todo IMPROVEMENT NEEDED? double _sumW; vector _sumWEbin; // p scheme jet definition fastjet::P_scheme p_scheme; /// @name Histograms //@{ Histo1DPtr _h_chMult_gg; Histo1DPtr _h_chFragFunc_gg; BinnedHistogram _h_chMult_qq; BinnedHistogram _h_chFragFunc_qq; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I631361); } diff --git a/analyses/pluginLEP/OPAL_2004_I648738.cc b/analyses/pluginLEP/OPAL_2004_I648738.cc --- a/analyses/pluginLEP/OPAL_2004_I648738.cc +++ b/analyses/pluginLEP/OPAL_2004_I648738.cc @@ -1,118 +1,118 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class OPAL_2004_I648738 : public Analysis { public: /// Constructor OPAL_2004_I648738() : Analysis("OPAL_2004_I648738"), _sumW(3), _histo_xE(3) { } /// @name Analysis methods //@{ void init() { declare(FinalState(), "FS"); declare(ChargedFinalState(), "CFS"); unsigned int ih=0; if (inRange(0.5*sqrtS()/GeV, 4.0, 9.0)) { ih = 1; } else if (inRange(0.5*sqrtS()/GeV, 9.0, 19.0)) { ih = 2; } else if (inRange(0.5*sqrtS()/GeV, 19.0, 30.0)) { ih = 3; } else if (inRange(0.5*sqrtS()/GeV, 45.5, 45.7)) { ih = 5; } else if (inRange(0.5*sqrtS()/GeV, 30.0, 70.0)) { ih = 4; } else if (inRange(0.5*sqrtS()/GeV, 91.5, 104.5)) { ih = 6; } assert(ih>0); // book the histograms book(_histo_xE[0], ih+5,1,1); book(_histo_xE[1], ih+5,1,2); if(ih<5) book(_histo_xE[2] ,ih+5,1,3); book(_sumW[0], "sumW_0"); book(_sumW[1], "sumW_1"); book(_sumW[2], "sumW_2"); } /// Perform the per-event analysis void analyze(const Event& event) { // find the initial quarks/gluons Particles initial; for (ConstGenParticlePtr p : HepMCUtils::particles(event.genEvent())) { ConstGenVertexPtr pv = p->production_vertex(); const PdgId pid = abs(p->pdg_id()); if(!( (pid>=1&&pid<=5) || pid ==21) ) continue; bool passed = false; for (ConstGenParticlePtr pp : HepMCUtils::particles(pv, Relatives::PARENTS)) { const PdgId ppid = abs(pp->pdg_id()); passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || ppid == PID::ZBOSON || ppid == PID::GAMMA); if(passed) break; } if(passed) initial.push_back(Particle(*p)); } if(initial.size()!=2) { vetoEvent; } // type of event unsigned int itype=2; - if(initial[0].pdgId()==-initial[1].pdgId()) { - PdgId pid = abs(initial[0].pdgId()); + if(initial[0].pid()==-initial[1].pid()) { + PdgId pid = abs(initial[0].pid()); if(pid>=1&&pid<=4) itype=0; else itype=1; } assert(itype<_histo_xE.size()); // fill histograms _sumW[itype]->fill(2.); const Particles& chps = applyProjection(event, "CFS").particles(); for(const Particle& p : chps) { double xE = 2.*p.E()/sqrtS(); _histo_xE[itype]->fill(xE); } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=0;ix<_histo_xE.size();++ix) { if(_sumW[ix]->val()>0.) scale(_histo_xE[ix],1./ *_sumW[ix]); } } //@} private: vector _sumW; /// @name Histograms //@{ vector _histo_xE; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I648738); } diff --git a/analyses/pluginLHCb/LHCB_2010_S8758301.cc b/analyses/pluginLHCb/LHCB_2010_S8758301.cc --- a/analyses/pluginLHCb/LHCB_2010_S8758301.cc +++ b/analyses/pluginLHCb/LHCB_2010_S8758301.cc @@ -1,553 +1,553 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Math/Constants.hh" #include "Rivet/Math/Units.hh" namespace Rivet { using namespace std; class LHCB_2010_S8758301 : public Analysis { public: // Lifetime cut: longest living ancestor ctau < 10^-11 [m] const double MAX_CTAU = 1.0E-11; // [m] const double MIN_PT = 0.0001; // [GeV/c] /// @name Constructors etc. //@{ /// Constructor LHCB_2010_S8758301() : Analysis("LHCB_2010_S8758301"), sumKs0_badnull(0), sumKs0_badlft(0), sumKs0_all(0), sumKs0_outup(0), sumKs0_outdwn(0), sum_low_pt_loss(0), sum_high_pt_loss(0) { } //@} /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { MSG_DEBUG("Initializing analysis!"); fillMap(partLftMap); declare(UnstableParticles(), "UFS"); book(_h_K0s_pt_30 ,1,1,1); book(_h_K0s_pt_35 ,1,1,2); book(_h_K0s_pt_40 ,1,1,3); book(_h_K0s_pt_y_30 ,2,1,1); book(_h_K0s_pt_y_35 ,2,1,2); book(_h_K0s_pt_y_40 ,2,1,3); book(_h_K0s_pt_y_all ,3,1,1); book(sumKs0_30, "TMP/sumKs0_30"); book(sumKs0_35, "TMP/sumKs0_35"); book(sumKs0_40, "TMP/sumKs0_40"); } /// Perform the per-event analysis void analyze(const Event& event) { int id; double y, pT; const UnstableParticles& ufs = apply(event, "UFS"); double ancestor_lftime; for (const Particle& p : ufs.particles()) { id = p.pid(); if ((id != 310) && (id != -310)) continue; sumKs0_all ++; ancestor_lftime = 0.; ConstGenParticlePtr long_ancestor = getLongestLivedAncestor(p, ancestor_lftime); if ( !(long_ancestor) ) { sumKs0_badnull ++; continue; } if ( ancestor_lftime > MAX_CTAU ) { sumKs0_badlft ++; MSG_DEBUG("Ancestor " << long_ancestor->pdg_id() << ", ctau: " << ancestor_lftime << " [m]"); continue; } const FourMomentum& qmom = p.momentum(); y = 0.5 * log((qmom.E() + qmom.pz())/(qmom.E() - qmom.pz())); pT = sqrt((qmom.px() * qmom.px()) + (qmom.py() * qmom.py())); if (pT < MIN_PT) { sum_low_pt_loss ++; MSG_DEBUG("Small pT K^0_S: " << pT << " GeV/c."); } if (pT > 1.6) { sum_high_pt_loss ++; } if (y > 2.5 && y < 4.0) { _h_K0s_pt_y_all->fill(pT); if (y > 2.5 && y < 3.0) { _h_K0s_pt_y_30->fill(pT); _h_K0s_pt_30->fill(pT); sumKs0_30->fill(); } else if (y > 3.0 && y < 3.5) { _h_K0s_pt_y_35->fill(pT); _h_K0s_pt_35->fill(pT); sumKs0_35->fill(); } else if (y > 3.5 && y < 4.0) { _h_K0s_pt_y_40->fill(pT); _h_K0s_pt_40->fill(pT); sumKs0_40->fill(); } } else if (y < 2.5) { sumKs0_outdwn ++; } else if (y > 4.0) { sumKs0_outup ++; } } } /// Normalise histograms etc., after the run void finalize() { MSG_DEBUG("Total number Ks0: " << sumKs0_all << endl << "Sum of weights: " << sumOfWeights() << endl - << "Weight Ks0 (2.5 < y < 3.0): " << sumKs0_30 << endl - << "Weight Ks0 (3.0 < y < 3.5): " << sumKs0_35 << endl - << "Weight Ks0 (3.5 < y < 4.0): " << sumKs0_40 << endl + << "Weight Ks0 (2.5 < y < 3.0): " << sumKs0_30 ->sumW()<< endl + << "Weight Ks0 (3.0 < y < 3.5): " << sumKs0_35->sumW() << endl + << "Weight Ks0 (3.5 < y < 4.0): " << sumKs0_40->sumW() << endl << "Nb. unprompt Ks0 [null mother]: " << sumKs0_badnull << endl << "Nb. unprompt Ks0 [mother lifetime exceeded]: " << sumKs0_badlft << endl << "Nb. Ks0 (y > 4.0): " << sumKs0_outup << endl << "Nb. Ks0 (y < 2.5): " << sumKs0_outdwn << endl << "Nb. Ks0 (pT < " << (MIN_PT/MeV) << " MeV/c): " << sum_low_pt_loss << endl << "Nb. Ks0 (pT > 1.6 GeV/c): " << sum_high_pt_loss << endl << "Cross-section [mb]: " << crossSection()/millibarn << endl << "Nb. events: " << numEvents()); // Compute cross-section; multiply by bin width for correct scaling // cross-section given by Rivet in pb double xsection_factor = crossSection()/sumOfWeights(); // Multiply bin width for correct scaling, xsection in mub scale(_h_K0s_pt_30, 0.2*xsection_factor/microbarn); scale(_h_K0s_pt_35, 0.2*xsection_factor/microbarn); scale(_h_K0s_pt_40, 0.2*xsection_factor/microbarn); // Divide by dy (rapidity window width), xsection in mb scale(_h_K0s_pt_y_30, xsection_factor/0.5/millibarn); scale(_h_K0s_pt_y_35, xsection_factor/0.5/millibarn); scale(_h_K0s_pt_y_40, xsection_factor/0.5/millibarn); scale(_h_K0s_pt_y_all, xsection_factor/1.5/millibarn); } //@} private: /// Get particle lifetime from hardcoded data double getLifeTime(int pid) { double lft = -1.0; if (pid < 0) pid = - pid; // Correct Pythia6 PIDs for f0(980), f0(1370) mesons if (pid == 10331) pid = 30221; if (pid == 10221) pid = 9010221; map::iterator pPartLft = partLftMap.find(pid); // search stable particle list if (pPartLft == partLftMap.end()) { if (pid <= 100 || pid == 990) return 0.0; for ( auto id : stablePDGIds ) { if (pid == id) { lft = 0.0; break; } } } else { lft = (*pPartLft).second; } if (lft < 0.0) MSG_ERROR("Could not determine lifetime for particle with PID " << pid << "... This K_s^0 will be considered unprompt!"); return lft; } ConstGenParticlePtr getLongestLivedAncestor(const Particle& p, double& lifeTime) { ConstGenParticlePtr ret = nullptr; lifeTime = 1.; if (p.genParticle() == nullptr) return nullptr; ConstGenParticlePtr pmother = p.genParticle(); double longest_ctau = 0.; double mother_ctau; int mother_pid; ConstGenVertexPtr ivertex = pmother->production_vertex(); while (ivertex) { vector inparts = HepMCUtils::particles(ivertex, Relatives::PARENTS); if (inparts.size() < 1) {ret = nullptr; break;} // error: should never happen! pmother = inparts.at(0); // first mother particle mother_pid = pmother->pdg_id(); ivertex = pmother->production_vertex(); // get next vertex if ( (mother_pid == 2212) || (mother_pid <= 100) ) { if (ret == nullptr) ret = pmother; continue; } mother_ctau = getLifeTime(mother_pid); if (mother_ctau < 0.) { ret= nullptr; break; } // error:should never happen! if (mother_ctau > longest_ctau) { longest_ctau = mother_ctau; ret = pmother; } } if (ret) lifeTime = longest_ctau * c_light; return ret; } // Fill the PDG Id to Lifetime[seconds] map // Data was extract from LHCb Particle Table using ParticleSvc bool fillMap(map &m) { m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16; m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16; m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26; m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12; m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24; m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08; m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24; m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24; m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24; m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23; m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21; m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12; m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13; m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19; m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22; m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23; m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12; m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13; m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24; m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24; m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24; m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24; m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24; m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24; m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24; m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24; m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24; m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24; m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10; m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23; m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22; m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14; m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19; m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19; m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19; m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12; m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12; m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24; m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24; m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24; m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24; m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24; m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23; m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23; m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24; m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24; m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24; m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24; m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23; m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24; m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24; m[13224] = 1.09702E-23; m[13226] = 5.485102E-24; m[13312] = 4.135667E-22; m[13314] = 2.742551E-23; m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24; m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24; m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24; m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22; m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24; m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24; m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24; m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24; m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24; m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24; m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24; m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24; m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24; m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24; m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24; m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24; m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24; m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24; m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24; m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24; m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24; m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24; m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20; m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24; m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24; m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24; m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24; m[9020221] = 8.093281E-23; m[9020443] = 1.061633E-23; m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24; m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24; m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21; return true; } /// @name Histograms //@{ Histo1DPtr _h_K0s_pt_y_30; // histogram for 2.5 < y < 3.0 (d2sigma) Histo1DPtr _h_K0s_pt_y_35; // histogram for 3.0 < y < 3.5 (d2sigma) Histo1DPtr _h_K0s_pt_y_40; // histogram for 3.5 < y < 4.0 (d2sigma) Histo1DPtr _h_K0s_pt_30; // histogram for 2.5 < y < 3.0 (sigma) Histo1DPtr _h_K0s_pt_35; // histogram for 3.0 < y < 3.5 (sigma) Histo1DPtr _h_K0s_pt_40; // histogram for 3.5 < y < 4.0 (sigma) Histo1DPtr _h_K0s_pt_y_all; // histogram for 2.5 < y < 4.0 (d2sigma) CounterPtr sumKs0_30; // Sum of weights 2.5 < y < 3.0 CounterPtr sumKs0_35; // Sum of weights 3.0 < y < 3.5 CounterPtr sumKs0_40; // Sum of weights 3.5 < y < 4.0 // Various counters mainly for debugging and comparisons between different generators size_t sumKs0_badnull; // Nb of particles for which mother could not be identified size_t sumKs0_badlft; // Nb of mesons with long lived mothers size_t sumKs0_all; // Nb of all Ks0 generated size_t sumKs0_outup; // Nb of mesons with y > 4.0 size_t sumKs0_outdwn; // Nb of mesons with y < 2.5 size_t sum_low_pt_loss; // Nb of mesons with very low pT (indicates when units are mixed-up) size_t sum_high_pt_loss; // Nb of mesons with pT > 1.6 GeV/c // Map between PDG id and particle lifetimes in seconds std::map partLftMap; // Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable) static const array stablePDGIds; //@} }; // Actual initialization according to ISO C++ requirements - const array LHCB_2010_S8758301::stablePDGIds{{ +const array LHCB_2010_S8758301::stablePDGIds = { 311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303, 4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414, 4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324, 5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534, 5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112, 12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343, 30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321, 100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555, 120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013, 2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223, 3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001, 4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023, 9900024, 9900041, 9900042}; // Hook for the plugin system DECLARE_RIVET_PLUGIN(LHCB_2010_S8758301); } diff --git a/analyses/pluginLHCf/LHCF_2015_I1351909.cc b/analyses/pluginLHCf/LHCF_2015_I1351909.cc --- a/analyses/pluginLHCf/LHCF_2015_I1351909.cc +++ b/analyses/pluginLHCf/LHCF_2015_I1351909.cc @@ -1,303 +1,303 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief Add a short analysis description here class LHCF_2015_I1351909 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(LHCF_2015_I1351909); static constexpr bool lhcf_like = true; static constexpr int ndecay = 1; static constexpr int nbeam = 2; static constexpr double D1_begin = 82000.; //mm 60000.; //mm static constexpr double D1_end = 82000; //mm 90000.; //mm static constexpr double IPtoLHCf = 141050.; //mm /// @name Analysis methods bool isParticleFromCollision(const Particle& p, const Particles& parents, const Beam& beams) const { bool beam[nbeam]={false}; if (parents.size()==nbeam) { for ( int ipar=0; ipar < nbeam; ++ipar ) { if ( parents[ipar].genParticle() == beams.beams().first.genParticle() || parents[ipar].genParticle() == beams.beams().second.genParticle() ) beam[ipar] = true; } if(beam[0] && beam[1]) return true; } return false; } bool isParticleFromDecay(const Particle p, const Particles& parents) const { return (parents.size() == ndecay); } bool isDeviated(Particle p, Particle parent) { //Select/Remove particles decayed between IP and LHCf ConstGenVertexPtr pv = p.genParticle()->production_vertex(); assert(pv != nullptr); const double decay_vertex = pv->position().z()/mm; const double parent_charge = PID::charge(parent.pid()); const double descendant_charge = PID::charge(p.pid()); if(parent_charge == 0) { //Particles produced by neutral parent decay if(descendant_charge == 0) { return false; } else { if(decay_vertex >= D1_end) return false; else return true; //Remove charged descendants produced from decay before end of D1 } } else { //Particles produced by charged parent decay if(decay_vertex <= D1_begin) { if(descendant_charge == 0) return false; else return true; //Remove charged descendants produced from decay before end of D1 } else { return true; //Remove particles produced by charged parent decay after begin of D1 } } return false; } bool isSameParticle(Particle p1, Particle p2) { if(p1.pid() == p2.pid() && mom(p1).t() == mom(p2).t() && mom(p1).x() == mom(p2).x() && mom(p1).y() == mom(p2).y() && mom(p1).z() == mom(p2).z()) return true; else return false; } bool isAlreadyProcessed(Particle p, vector list) { for(unsigned int ipar=0; iparproduction_vertex(); const double x0 = pv->position().x()/mm; const double y0 = pv->position().y()/mm; const double z0 = pv->position().z()/mm; const double px = p.px()/MeV; const double py = p.py()/MeV; const double pz = abs(p.pz()/MeV); const double dist_to_lhcf = IPtoLHCf - z0; const double x1 = x0 + (dist_to_lhcf * px/pz); const double y1 = y0 + (dist_to_lhcf * py/pz); const double r = sqrt(pow(x1, 2.)+pow(y1, 2.)); const double theta = atan(abs(r / IPtoLHCf)); const double pseudorapidity = - log (tan (theta/2.) ); return pseudorapidity; } /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // declare(FinalState("FS"); - addProjection(FinalState(), "FS"); - addProjection(Beam(), "Beams"); + declare(FinalState(), "FS"); + declare(Beam(), "Beams"); // Book histograms book(_h_n_en_eta1, 1, 1, 1); book(_h_n_en_eta2, 1, 1, 2); book(_h_n_en_eta3, 1, 1, 3); } /// Perform the per-event analysis void analyze(const Event& event) { const FinalState &fs = applyProjection (event, "FS"); Particles fs_particles = fs.particles(); const Beam & beams = applyProjection (event, "Beam"); vector processed_parents; processed_parents.clear(); for (Particle& p: fs_particles ) { if(p.pz()/GeV<0.) continue; double eta = 0.; double en = 0.; if(lhcf_like) { //====================================================================== //========== LHCf-like analysis ======================================== //====================================================================== vector parents = p.parents(); if(isParticleFromCollision(p, parents, beams)) { //Particles directly produced in collisions if(!PID::isHadron(p.pid())) continue; //Remove non-hadron particles if(PID::charge(p.pid()) != 0) continue; //Remove charged particles eta = p.eta(); en = p.E()/GeV; } else if(isParticleFromDecay(p, parents)) { //Particles produced from decay ConstGenVertexPtr pv = p.genParticle()->production_vertex(); assert(pv != nullptr); const double decay_vertex = pv->position().z()/mm; Particle parent = parents[0]; if(decay_vertex < IPtoLHCf) { //If decay happens before LHCf we consider descendants if(!PID::isHadron(p.pid())) continue; //Remove non-hadron descendants if(isDeviated(p, parent)) continue; //Remove descendants deviated by D1 eta = RecomputeEta(p); en = p.E()/GeV; } else {//If decay happens after LHCf we consider parents vector ancestors; ancestors.clear(); int ngeneration=0; bool isValid=true; bool isEnded=false; while(!isEnded) //Loop over all generations in the decay { vector temp_part; temp_part.clear(); if(ngeneration==0) { parent = parents[0]; temp_part = parent.parents(); } else { parent = ancestors[0]; temp_part = parent.parents(); } ancestors.clear(); ancestors = temp_part; Particle ancestor = ancestors[0]; if(isParticleFromCollision(parent, ancestors, beams)) { //if we found first particles produced in collisions we consider them isEnded=true; if(!PID::isHadron(parent.pid())) isValid=false; //Remove non-hadron ancestors/parents if(PID::charge(parent.pid()) != 0) isValid=false; //Remove charged ancestors/parents if(isAlreadyProcessed(parent, processed_parents)) isValid=false; //Remove already processed ancestors/parents when looping other descendants else processed_parents.push_back(parent); //Fill ancestors/parents in the list eta = parent.eta(); en = parent.E()/GeV; } else if (isParticleFromDecay(parent, ancestors)) { //if we found first particles produced entering LHCf we consider them ConstGenVertexPtr pv_prev = parent.genParticle()->production_vertex(); assert(pv_prev != NULL); const double previous_decay_vertex = pv_prev->position().z()/mm; if(previous_decay_vertex < IPtoLHCf) { isEnded=true; if(!PID::isHadron(parent.pid())) isValid=false; //Remove non-hadron ancestors/parents if(isDeviated(parent, ancestor)) isValid=false; //Remove ancestors/parents deviated by D1 if(isAlreadyProcessed(parent, processed_parents)) isValid=false; //Remove already processed ancestors/parents when looping other descendants else processed_parents.push_back(parent); //Fill ancestors/parents in the list eta = RecomputeEta(parent); en = parent.E()/GeV; } } else { //This condition should never happen cout << "Looping over particles generation ended without match : Exit..." << endl; exit(EXIT_FAILURE); } ++ngeneration; } if(!isValid) continue; } } else { //This condition should never happen cout << "Particle seems not to be produced in collision or decay : Exit..." << endl; exit(EXIT_FAILURE); } } else { //====================================================================== //========== Only neutrons at IP ======================================= //====================================================================== vector parents = p.parents(); //if(isParticleFromCollision(p, parents)) { //Particles directly produced in collisions if(p.pid() != 2112 ) continue; eta = p.eta(); en = p.E()/GeV; //} } // Fill histograms if( eta > 10.76 ){ _h_n_en_eta1->fill( en ); }else if(eta > 8.99 && eta < 9.22){ _h_n_en_eta2->fill( en ); }else if(eta > 8.81 && eta < 8.99){ _h_n_en_eta3->fill( en ); } } } /// Normalise histograms etc., after the run void finalize() { scale(_h_n_en_eta1, crossSection()/millibarn/sumOfWeights()); // norm to cross section scale(_h_n_en_eta2, crossSection()/millibarn/sumOfWeights()); // norm to cross section scale(_h_n_en_eta3, crossSection()/millibarn/sumOfWeights()); // norm to cross section } //@} private: /// @name Histograms //@{ Histo1DPtr _h_n_en_eta1; Histo1DPtr _h_n_en_eta2; Histo1DPtr _h_n_en_eta3; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(LHCF_2015_I1351909); } diff --git a/analyses/pluginMC/MC_D_Dalitz.cc b/analyses/pluginMC/MC_D_Dalitz.cc --- a/analyses/pluginMC/MC_D_Dalitz.cc +++ b/analyses/pluginMC/MC_D_Dalitz.cc @@ -1,272 +1,271 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_D_Dalitz : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_D_Dalitz); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(), "UFS"); // Book histograms - _h_plus1 = bookHisto1D("h_plus1" ,200,0.,3. ); - _h_minus1 = bookHisto1D("h_minus1" ,200,0.,3.2 ); - _h_pipi1 = bookHisto1D("h_pipi1" ,200,0.,2. ); - _h_minus2 = bookHisto1D("h_minus2" ,200,0.,3.2 ); - _h_neutral2 = bookHisto1D("h_neutral2",200,0.,3.2 ); - _h_pipi2 = bookHisto1D("h_pipi2" ,200,0.,2. ); - _h_Kpilow3 = bookHisto1D("h_Kpilow3" ,200,0.,2. ); - _h_Kpihigh3 = bookHisto1D("h_Kpihigh3" ,200,0.,3.2 ); - _h_Kpiall3 = bookHisto1D("h_Kpiall3" ,200,0.,3. ); - _h_pipi3 = bookHisto1D("h_pipi3" ,200,0.,2. ); - _h_Kpip4 = bookHisto1D("h_Kpip4" ,200,0.,3.2 ); - _h_pipi4 = bookHisto1D("h_pipi4" ,200,0.,2. ); - _h_Kpi04 = bookHisto1D("h_Kpi04" ,200,0.,3.2); - _h_kppim5 = bookHisto1D("h_kppim5" ,200,0.,3. ); - _h_kppip5 = bookHisto1D("h_kppip5" ,200,0.,3.1 ); - _h_pippim5 = bookHisto1D("h_pippim5" ,200,0.,2. ); - _h_kppim6 = bookHisto1D("h_kppim6" ,200,0.,3.5); - _h_kppip6 = bookHisto1D("h_kppip6" ,200,0.,3.5); - _h_pippim6 = bookHisto1D("h_pippim6" ,200,0.,2.5); - _dalitz1 = bookHisto2D("dalitz1" ,50,0.3,3.2,50,0.3,3.2); - _dalitz2 = bookHisto2D("dalitz2" ,50,0.3,3. ,50,0.3,3. ); - _dalitz3 = bookHisto2D("dalitz3" ,50,0.3,2. ,50,0.07,2. ); - _dalitz4 = bookHisto2D("dalitz4" ,50,0.3,3.1 ,50,0.07,2. ); - _dalitz5 = bookHisto2D("dalitz5" ,50,0.,3. ,50,0.,2. ); - _dalitz6 = bookHisto2D("dalitz6" ,50,0.3,3.5,50,0.07,2.5); + book(_h_plus1, "h_plus1" ,200,0.,3. ); + book(_h_minus1, "h_minus1" ,200,0.,3.2 ); + book(_h_pipi1, "h_pipi1" ,200,0.,2. ); + book(_h_minus2, "h_minus2" ,200,0.,3.2 ); + book(_h_neutral2, "h_neutral2",200,0.,3.2 ); + book(_h_pipi2, "h_pipi2" ,200,0.,2. ); + book(_h_Kpilow3, "h_Kpilow3" ,200,0.,2. ); + book(_h_Kpihigh3, "h_Kpihigh3" ,200,0.,3.2 ); + book(_h_Kpiall3, "h_Kpiall3" ,200,0.,3. ); + book(_h_pipi3, "h_pipi3" ,200,0.,2. ); + book(_h_Kpip4, "h_Kpip4" ,200,0.,3.2 ); + book(_h_pipi4, "h_pipi4" ,200,0.,2. ); + book(_h_Kpi04, "h_Kpi04" ,200,0.,3.2); + book(_h_kppim5, "h_kppim5" ,200,0.,3. ); + book(_h_kppip5, "h_kppip5" ,200,0.,3.1 ); + book(_h_pippim5, "h_pippim5" ,200,0.,2. ); + book(_h_kppim6, "h_kppim6" ,200,0.,3.5); + book(_h_kppip6, "h_kppip6" ,200,0.,3.5); + book(_h_pippim6, "h_pippim6" ,200,0.,2.5); + book(_dalitz1, "dalitz1" ,50,0.3,3.2,50,0.3,3.2); + book(_dalitz2, "dalitz2" ,50,0.3,3. ,50,0.3,3. ); + book(_dalitz3, "dalitz3" ,50,0.3,2. ,50,0.07,2. ); + book(_dalitz4, "dalitz4" ,50,0.3,3.1 ,50,0.07,2. ); + book(_dalitz5, "dalitz5" ,50,0.,3. ,50,0.,2. ); + book(_dalitz6, "dalitz6" ,50,0.3,3.5,50,0.07,2.5); } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles & pip , Particles & pim , Particles & pi0 , Particles & Kp , Particles & Km , Particles & K0) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::KPLUS ) { Kp.push_back(p); ++nstable; } else if (id == PID::KMINUS ) { Km.push_back(p); ++nstable; } else if (id == PID::PIPLUS) { pip.push_back(p); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::PI0) { pi0.push_back(p); ++nstable; } else if (id == PID::K0S||id == PID::K0L) { K0.push_back(p); ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p, nstable, pip, pim, pi0, Kp , Km, K0); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { - double weight = event.weight(); for(const Particle& meson : apply(event, "UFS"). particles(Cuts::abspid== 411 ||Cuts::abspid== 421 ||Cuts::abspid== 431 )) { unsigned int nstable(0); Particles pip, pim, pi0, Kp , Km, K0; findDecayProducts(meson, nstable, pip, pim, pi0, Kp , Km, K0); - if(meson.pdgId()<0) { + if(meson.pid()<0) { swap(pim,pip); swap(Kp,Km); } - if(abs(meson.pdgId())==421) { + if(abs(meson.pid())==421) { if(pim.size()==1&&pip.size()==1&&K0.size()==1) { double mminus = (pim[0].momentum()+K0[0].momentum() ).mass2(); double mplus = (pip[0].momentum()+K0[0].momentum() ).mass2(); double mpipi = (pip[0].momentum()+pim[0].momentum()).mass2(); - _h_plus1 ->fill(mplus,weight); - _h_minus1 ->fill(mminus,weight); - _h_pipi1 ->fill(mpipi,weight); - _dalitz1 ->fill(mplus,mminus,weight); + _h_plus1 ->fill(mplus); + _h_minus1 ->fill(mminus); + _h_pipi1 ->fill(mpipi); + _dalitz1 ->fill(mplus,mminus); } else if (pip.size()==1&&Km.size()==1&&pi0.size()==1) { double mneut = (Km[0].momentum()+pip[0].momentum()).mass2(); double mminus = (Km[0].momentum()+pi0[0].momentum()).mass2(); double mpipi = (pip[0].momentum()+pi0[0].momentum()).mass2(); - _h_neutral2 ->fill(mneut,weight); - _h_minus2 ->fill(mminus,weight); - _h_pipi2 ->fill(mpipi,weight); - _dalitz2->fill(mminus,mneut,weight); + _h_neutral2 ->fill(mneut); + _h_minus2 ->fill(mminus); + _h_pipi2 ->fill(mpipi); + _dalitz2->fill(mminus,mneut); } } - else if(abs(meson.pdgId())==411) { + else if(abs(meson.pid())==411) { if(pip.size()==2&&Km.size()==1) { double mplus = (Km[0].momentum() +pip[0].momentum()).mass2(); double mminus = (Km[0].momentum() +pip[1].momentum()).mass2(); double mpipi = (pip[0].momentum()+pip[1].momentum()).mass2(); if(mplusfill( mminus,weight); - _h_Kpihigh3->fill( mplus,weight); - _h_Kpiall3 ->fill( mminus,weight); - _h_Kpiall3 ->fill( mplus,weight); - _h_pipi3 ->fill( mpipi,weight); - _dalitz3->fill(mminus,mpipi, weight); + _h_Kpilow3 ->fill( mminus); + _h_Kpihigh3->fill( mplus); + _h_Kpiall3 ->fill( mminus); + _h_Kpiall3 ->fill( mplus); + _h_pipi3 ->fill( mpipi); + _dalitz3->fill(mminus,mpipi); } else if (pip.size()==1&&pi0.size()==1&&K0.size()==1) { double mminus = (K0[0].momentum()+pip[0].momentum()).mass2(); double mplus = (K0[0].momentum()+pi0[0].momentum()).mass2(); double mpipi = (pip[0].momentum()+pi0[0].momentum()).mass2(); - _h_Kpip4 ->fill( mminus, weight); - _h_pipi4 ->fill( mpipi , weight); - _h_Kpi04 ->fill( mplus , weight); - _dalitz4->fill(mplus,mpipi, weight); + _h_Kpip4 ->fill( mminus); + _h_pipi4 ->fill( mpipi ); + _h_Kpi04 ->fill( mplus ); + _dalitz4->fill(mplus,mpipi); } else if (pim.size()==1&&Kp.size()==1&&pip.size()==1) { double mplus = (Kp [0].momentum()+pip[0].momentum()).mass2(); double mminus = (Kp [0].momentum()+pim[0].momentum()).mass2(); double mpipi = (pip[0].momentum()+pim[0].momentum()).mass2(); - _h_kppim5 ->fill(mminus,weight); - _h_kppip5 ->fill(mplus ,weight); - _h_pippim5->fill(mpipi ,weight); - _dalitz5->fill(mminus,mpipi, weight); + _h_kppim5 ->fill(mminus); + _h_kppip5 ->fill(mplus ); + _h_pippim5->fill(mpipi ); + _dalitz5->fill(mminus,mpipi); } } - else if(abs(meson.pdgId())==431) { + else if(abs(meson.pid())==431) { if (pim.size()==1&&Kp.size()==1&&pip.size()==1) { double mplus = (Kp [0].momentum()+pip[0].momentum()).mass2(); double mminus = (Kp [0].momentum()+pim[0].momentum()).mass2(); double mpipi = (pip[0].momentum()+pim[0].momentum()).mass2(); - _h_kppim6 ->fill(mminus,weight); - _h_kppip6 ->fill(mplus ,weight); - _h_pippim6->fill(mpipi ,weight); - _dalitz6->fill(mminus,mpipi, weight); + _h_kppim6 ->fill(mminus); + _h_kppip6 ->fill(mplus ); + _h_pippim6->fill(mpipi ); + _dalitz6->fill(mminus,mpipi); } } } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_plus1); normalize(_h_minus1); normalize(_h_pipi1); normalize(_dalitz1); normalize(_h_minus2); normalize(_h_pipi2); normalize(_h_neutral2); normalize(_dalitz2); normalize(_h_Kpilow3); normalize(_h_Kpihigh3); normalize(_h_Kpiall3); normalize(_h_pipi3); normalize(_dalitz3); normalize(_h_Kpip4); normalize(_h_pipi4); normalize(_h_Kpi04); normalize(_dalitz4); normalize(_h_kppim5); normalize(_h_kppip5); normalize(_h_pippim5); normalize(_dalitz5); normalize(_h_kppim6); normalize(_h_kppip6); normalize(_h_pippim6); normalize(_dalitz6); } //@} /// @name Histograms //@{ // Histograms for D^0\to \bar{K}^0\pi^+\pi^- //m^2_+ Histo1DPtr _h_plus1; //m^2_+ Histo1DPtr _h_minus1; //m^2_{\pi\pi} Histo1DPtr _h_pipi1; // Dalitz plot Histo2DPtr _dalitz1; // Histograms for D^0\to K^-\pi^+\pi^0 // Histogram for the K^-\pi^+ mass Histo1DPtr _h_minus2; // Histogram for the \pi^+\pi^0 mass Histo1DPtr _h_pipi2; // Histogram for the K^-\pi^0 mass Histo1DPtr _h_neutral2; // Dalitz plot Histo2DPtr _dalitz2; // Histograms for D^+\to K^-\pi^+\pi^+ // Histogram for K^-\pi^+ low Histo1DPtr _h_Kpilow3; // Histogram for K^-\pi^+ high Histo1DPtr _h_Kpihigh3; // Histogram for K^-\pi^+ all Histo1DPtr _h_Kpiall3; // Histogram for \pi^+\pi^- Histo1DPtr _h_pipi3; // Dalitz plot Histo2DPtr _dalitz3; // Histograms for D^+\to\bar{K}^0\pi^+\pi^0 // Histogram for the \bar{K}^0\pi^+ mass Histo1DPtr _h_Kpip4; // Histogram for the \pi^+\pi^0 mass Histo1DPtr _h_pipi4; // Histogram for the \bar{K}^0\pi^0 mass Histo1DPtr _h_Kpi04; // Dalitz plot Histo2DPtr _dalitz4; // Histograms for D^+\to K^+\pi^-\pi^+ // Histogram for K^+\pi^- Histo1DPtr _h_kppim5; // Histogram for K^+\pi^+ Histo1DPtr _h_kppip5; // Histogram for \pi^+\pi^- Histo1DPtr _h_pippim5; // Dalitz plot Histo2DPtr _dalitz5; // Histograms for D_s^+\to K^+\pi^-\pi^+ // Histogram for K^+\pi^- Histo1DPtr _h_kppim6; // Histogram for K^+\pi^+ Histo1DPtr _h_kppip6; // Histogram for \pi^+\pi^- Histo1DPtr _h_pippim6; // Dalitz plot Histo2DPtr _dalitz6; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_D_Dalitz); } diff --git a/analyses/pluginMC/MC_Eta_Decay.cc b/analyses/pluginMC/MC_Eta_Decay.cc --- a/analyses/pluginMC/MC_Eta_Decay.cc +++ b/analyses/pluginMC/MC_Eta_Decay.cc @@ -1,247 +1,247 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_Eta_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_Eta_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(), "UFS"); // Book histograms double meta[2]={547.45, 957.78}; for(unsigned int ix=0;ix<2;++ix) { - ostringstream title; title << "_" << ix; + std::ostringstream title; title << "_" << ix; _mgammagamma .push_back(bookHisto1D("mgammagamma" +title.str(),200,0.,meta[ix]) ); _mpi0gamma .push_back(bookHisto1D("mpi0gamma" +title.str(),200,0.,meta[ix])) ; _mpipgamma .push_back(bookHisto1D("mpipgamma" +title.str(),200,0.,meta[ix])); _mpimgamma .push_back(bookHisto1D("mpimgamma" +title.str(),200,0.,meta[ix])); _photonenergy.push_back(bookHisto1D("photonenergy"+title.str(),200,0.,meta[ix])); _mpippim .push_back(bookHisto1D("mpippim" +title.str(),200,0.,meta[ix])); _dpippim .push_back(bookHisto1D("dpippim" +title.str(),200,200.,meta[ix])); _dpi0pi0 .push_back(bookHisto1D("dpi0pi0" +title.str(),200,200.,meta[ix])); _dpi0pip .push_back(bookHisto1D("dpi0pip" +title.str(),200,200.,meta[ix])); _dpi0pim .push_back(bookHisto1D("dpi0pim" +title.str(),200,200.,meta[ix])); } _dpi0pi0.push_back(bookHisto1D("dpi0pi0_2",200,200.,500. )); _dpippim.push_back(bookHisto1D("dpippim_2",200,200.,500. )); _dpipeta=bookHisto1D("dpipeta",200,500.,meta[1]) ; _dpimeta=bookHisto1D("dpimeta",200,500.,meta[1]) ; _dpi0eta=bookHisto1D("dpi0eta",200,500.,meta[1]) ; } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles& pip, Particles& pim, Particles& pi0, Particles& eta, Particles& gamma) { for(const Particle & p : mother.children()) { int id = p.pdgId(); if ( id == PID::ETA ) { eta.push_back(p); ++nstable; } else if ( id == PID::PHOTON ) { gamma.push_back(p); ++nstable; } else if (id == PID::PIPLUS) { pip.push_back(p); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::PI0 ) { pi0.push_back(p); ++nstable; } else if (id == PID::K0S || id == PID::K0L || id == PID::KPLUS || id == PID::KMINUS) ++nstable; else if ( !p.children().empty() ) { findDecayProducts(p,nstable,pip,pim,pi0,eta,gamma); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { double weight = event.weight(); // Loop over f_1 mesons for(const Particle& meson : apply(event, "UFS"). particles(Cuts::pid==221||Cuts::pid==331)) { unsigned int nstable(0); Particles pip, pim, pi0, eta, gamma; findDecayProducts(meson,nstable,pip, pim, pi0, eta, gamma); unsigned int imeson = meson.pdgId()==221 ? 0 : 1; // pi0 gamma gamma if(nstable==3 && pi0.size()==1 && gamma.size()==2) { _mgammagamma[imeson]->fill((gamma[0].momentum()+gamma[1].momentum()).mass()/MeV,weight); _mpi0gamma [imeson]->fill(( pi0[0].momentum()+gamma[0].momentum()).mass()/MeV,weight); _mpi0gamma [imeson]->fill(( pi0[0].momentum()+gamma[1].momentum()).mass()/MeV,weight); } // pi+pi-gamma analysis else if(nstable==3 && pip.size()==1 && pim.size()==1 && gamma.size()==1) { FourMomentum ptemp = pip[0].momentum()+pim[0].momentum(); double mpipi = ptemp.mass(); _mpippim[imeson]->fill(mpipi/MeV,weight); double egamma = 0.5*(meson.mass()*meson.mass()-mpipi*mpipi)/meson.mass(); _photonenergy[imeson]->fill(egamma/MeV,weight); _mpipgamma[imeson]->fill((pip[0].momentum()+gamma[0].momentum()).mass()/MeV,weight); _mpimgamma[imeson]->fill((pim[0].momentum()+gamma[0].momentum()).mass()/MeV,weight); } else if(nstable==3&& pi0.size()==3) { _dpi0pi0[imeson]->fill((pi0[0].momentum()+pi0[1].momentum()).mass()/MeV,weight); _dpi0pi0[imeson]->fill((pi0[0].momentum()+pi0[2].momentum()).mass()/MeV,weight); _dpi0pi0[imeson]->fill((pi0[1].momentum()+pi0[2].momentum()).mass()/MeV,weight); } else if(nstable==3&& pip.size()==1&&pim.size()==1&&pi0.size()==1) { _dpi0pip[imeson]->fill((pi0[0].momentum()+pip[0].momentum()).mass()/MeV,weight); _dpi0pim[imeson]->fill((pi0[0].momentum()+pim[0].momentum()).mass()/MeV,weight); _dpippim[imeson]->fill((pip[0].momentum()+pim[0].momentum()).mass()/MeV,weight); } else if(nstable==3&& pi0.size()==2&&eta.size()==1) { _dpi0pi0[2]->fill((pi0[0].momentum()+pi0[1].momentum()).mass()/MeV,weight); _dpi0eta ->fill((pi0[0].momentum()+eta[0].momentum()).mass()/MeV,weight); _dpi0eta ->fill((pi0[1].momentum()+eta[0].momentum()).mass()/MeV,weight); } else if(nstable==3&& pip.size()==1&&pim.size()==1&&eta.size()==1) { _dpippim[2]->fill((pip[0].momentum()+pim[0].momentum()).mass()/MeV,weight); _dpipeta ->fill((pip[0].momentum()+eta[0].momentum()).mass()/MeV,weight); _dpimeta ->fill((pim[0].momentum()+eta[0].momentum()).mass()/MeV,weight); } } } /// Normalise histograms etc., after the run void finalize() { // normalize to unity for(unsigned int ix=0;ix<2;++ix) { normalize(_mgammagamma[ix]); normalize(_mpi0gamma[ix]); normalize(_mpipgamma[ix]); normalize(_mpimgamma[ix]); normalize(_mpippim[ix]); normalize(_photonenergy[ix]); normalize(_dpippim[ix]); normalize(_dpi0pi0[ix]); normalize(_dpi0pip[ix]); normalize(_dpi0pim[ix]); } normalize(_dpi0pi0[2]); normalize(_dpippim[2]); normalize(_dpipeta); normalize(_dpimeta); normalize(_dpi0eta); } //@} /** * Histograms for the decay \f$\eta\to\pi^0\gamma\gamma\f$ */ //@{ /** * Histogram for the mass of \f$\gamma\gamma\f$ */ vector _mgammagamma; /** * Histogrma for the mass of \f$\pi^0\gamma\f$ */ vector _mpi0gamma; //@} /** * Histograms for the decay \f$\eta\to\pi^+\pi^-\gamma\f$ */ //@{ /** * Histogram for the mass of \f$\pi^+\gamma\f$ */ vector _mpipgamma; /** * Histogram for the mass of \f$\pi^-\gamma\f$ */ vector _mpimgamma; /** * Histogram for the mass of \f$\pi^+\pi^-\f$ */ vector _mpippim; /** * Histogram for the photon energy */ vector _photonenergy; //@} /** * Histograms for the decay \f$\eta\pi\pi\pi\f$ and \f$\eta'\to\eta\pi\pi\f$. */ //@{ /** * Histogram for the mass of \f$\pi^+\pi^-\f$ */ vector _dpippim; /** * Histogram for the mass of \f$\pi^0\pi^0\f$ */ vector _dpi0pi0; /** * Histogram for the mass of \f$\pi^0\pi^+\f$ */ vector _dpi0pip; /** * Histogram for the mass of \f$\pi^0\pi^-\f$ */ vector _dpi0pim; /** * Histogram for the mass of \f$\pi^+\eta\f$ */ Histo1DPtr _dpipeta; /** * Histogram for the mass of \f$\pi^-\eta\f$ */ Histo1DPtr _dpimeta; /** * Histogram for the mass of \f$\pi^0\eta\f$ */ Histo1DPtr _dpi0eta; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_Eta_Decay); } diff --git a/analyses/pluginMC/MC_F1_Decay.cc b/analyses/pluginMC/MC_F1_Decay.cc --- a/analyses/pluginMC/MC_F1_Decay.cc +++ b/analyses/pluginMC/MC_F1_Decay.cc @@ -1,185 +1,185 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_F1_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_F1_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(),"UFS"); // eta pi0 pi0 mode - _h_eta0_etapi0 = bookHisto1D("eta0_etapi0" , 70, 0.66, 1.36); - _h_eta0_pi0pi0 = bookHisto1D("eta0_pi0pi0" , 80, 0.2, 1.0); - _h_eta0_etapi0pi0 = bookHisto1D("eta0_etapi0pi0", 70, 1.0, 1.7); + book(_h_eta0_etapi0, "eta0_etapi0" , 70, 0.66, 1.36); + book(_h_eta0_pi0pi0, "eta0_pi0pi0" , 80, 0.2, 1.0); + book(_h_eta0_etapi0pi0, "eta0_etapi0pi0", 70, 1.0, 1.7); // eta pi+pi- mode - _h_eta1_etapip = bookHisto1D("eta1_etapip" , 70, 0.66, 1.36); - _h_eta1_etapim = bookHisto1D("eta1_etapim" , 70, 0.66, 1.36); - _h_eta1_pippim = bookHisto1D("eta1_pippim" , 80, 0.2, 1.0); - _h_eta1_etapippim = bookHisto1D("eta1_etapippim", 70, 1.0, 1.7); + book(_h_eta1_etapip, "eta1_etapip" , 70, 0.66, 1.36); + book(_h_eta1_etapim, "eta1_etapim" , 70, 0.66, 1.36); + book(_h_eta1_pippim, "eta1_pippim" , 80, 0.2, 1.0); + book(_h_eta1_etapippim, "eta1_etapippim", 70, 1.0, 1.7); // pi+pi-2pi0 - _h_4pi0_pi0pi0 = bookHisto1D("4pi0_pi0pi0" , 80, 0.2, 1.0); - _h_4pi0_pippi0 = bookHisto1D("4pi0_pippi0" , 80, 0.2, 1.0); - _h_4pi0_pimpi0 = bookHisto1D("4pi0_pimpi0" , 80, 0.2, 1.0); - _h_4pi0_pippim = bookHisto1D("4pi0_pippim" , 80, 0.2, 1.0); - _h_4pi0_pippimpi0 = bookHisto1D("4pi0_pippimpi0",100, 0.4, 1.4); - _h_4pi0_pippi0pi0 = bookHisto1D("4pi0_pippi0pi0",100, 0.4, 1.4); - _h_4pi0_pimpi0pi0 = bookHisto1D("4pi0_pimpi0pi0",100, 0.4, 1.4); - _h_4pi0_4pi = bookHisto1D("4pi0_4pi" , 70, 1.0, 1.7); + book(_h_4pi0_pi0pi0, "4pi0_pi0pi0" , 80, 0.2, 1.0); + book(_h_4pi0_pippi0, "4pi0_pippi0" , 80, 0.2, 1.0); + book(_h_4pi0_pimpi0, "4pi0_pimpi0" , 80, 0.2, 1.0); + book(_h_4pi0_pippim, "4pi0_pippim" , 80, 0.2, 1.0); + book(_h_4pi0_pippimpi0, "4pi0_pippimpi0",100, 0.4, 1.4); + book(_h_4pi0_pippi0pi0, "4pi0_pippi0pi0",100, 0.4, 1.4); + book(_h_4pi0_pimpi0pi0, "4pi0_pimpi0pi0",100, 0.4, 1.4); + book(_h_4pi0_4pi, "4pi0_4pi" , 70, 1.0, 1.7); // 2pi+ 2pi- mode - _h_4pi1_pippip = bookHisto1D("4pi1_pippip" , 80, 0.2, 1.0); - _h_4pi1_pimpim = bookHisto1D("4pi1_pimpim" , 80, 0.2, 1.0); - _h_4pi1_pippim = bookHisto1D("4pi1_pippim" , 80, 0.2, 1.0); - _h_4pi1_pimpimpip = bookHisto1D("4pi1_pimpimpip",100, 0.4, 1.4); - _h_4pi1_pippippim = bookHisto1D("4pi1_pippippim",100, 0.4, 1.4); - _h_4pi1_4pi = bookHisto1D("4pi1_4pi" , 70, 1.0, 1.7); + book(_h_4pi1_pippip, "4pi1_pippip" , 80, 0.2, 1.0); + book(_h_4pi1_pimpim, "4pi1_pimpim" , 80, 0.2, 1.0); + book(_h_4pi1_pippim, "4pi1_pippim" , 80, 0.2, 1.0); + book(_h_4pi1_pimpimpip, "4pi1_pimpimpip",100, 0.4, 1.4); + book(_h_4pi1_pippippim, "4pi1_pippippim",100, 0.4, 1.4); + book(_h_4pi1_4pi, "4pi1_4pi" , 70, 1.0, 1.7); } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles& pip, Particles& pim, Particles& pi0, Particles& eta) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::ETA ) { eta.push_back(p); ++nstable; } else if (id == PID::PIPLUS) { pip.push_back(p); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::PI0 ) { pi0.push_back(p); ++nstable; } else if (id == PID::K0S || id == PID::K0L || id == PID::KPLUS || id == PID::KMINUS) ++nstable; else if ( !p.children().empty() ) { findDecayProducts(p,nstable,pip,pim,pi0,eta); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { // Loop over f_1 mesons for(const Particle& f1 : apply(event, "UFS").particles(Cuts::abspid==20223)) { unsigned int nstable(0); Particles pip, pim, pi0, eta; findDecayProducts(f1,nstable,pip, pim, pi0, eta); // pi+ pi- pi0 pi0 if(nstable==4 && pip.size()==1 && pim.size()==1 && pi0.size()==2) { _h_4pi0_pi0pi0->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),1.); _h_4pi0_pippi0->fill((pip[0].momentum()+pi0[0].momentum()).mass(),1.); _h_4pi0_pippi0->fill((pip[0].momentum()+pi0[1].momentum()).mass(),1.); _h_4pi0_pimpi0->fill((pim[0].momentum()+pi0[0].momentum()).mass(),1.); _h_4pi0_pimpi0->fill((pim[0].momentum()+pi0[1].momentum()).mass(),1.); _h_4pi0_pippim->fill((pip[0].momentum()+pim[0].momentum()).mass(),1.); _h_4pi0_pippimpi0->fill((pip[0].momentum()+pim[0].momentum()+pi0[0].momentum()).mass(),1.); _h_4pi0_pippimpi0->fill((pip[0].momentum()+pim[0].momentum()+pi0[1].momentum()).mass(),1.); _h_4pi0_pippi0pi0->fill((pi0[0].momentum()+pi0[1].momentum()+pip[0].momentum()).mass(),1.); _h_4pi0_pimpi0pi0->fill((pi0[0].momentum()+pi0[1].momentum()+pim[0].momentum()).mass(),1.); _h_4pi0_4pi->fill((pi0[0].momentum()+pi0[1].momentum()+pim[0].momentum()+pip[0].momentum()).mass(),1.); } else if(nstable==4 && pip.size()==2 && pim.size()==2) { _h_4pi1_pippip ->fill((pip[0].momentum()+pip[1].momentum()).mass(),1.); _h_4pi1_pimpim ->fill((pim[0].momentum()+pim[1].momentum()).mass(),1.); _h_4pi1_pippim ->fill((pip[0].momentum()+pim[0].momentum()).mass(),1.); _h_4pi1_pippim ->fill((pip[0].momentum()+pim[1].momentum()).mass(),1.); _h_4pi1_pippim ->fill((pip[1].momentum()+pim[0].momentum()).mass(),1.); _h_4pi1_pippim ->fill((pip[1].momentum()+pim[1].momentum()).mass(),1.); _h_4pi1_pimpimpip->fill((pim[0].momentum()+pim[1].momentum()+pip[0].momentum()).mass(),1.); _h_4pi1_pimpimpip->fill((pim[0].momentum()+pim[1].momentum()+pip[1].momentum()).mass(),1.); _h_4pi1_pippippim->fill((pip[0].momentum()+pip[1].momentum()+pim[0].momentum()).mass(),1.); _h_4pi1_pippippim->fill((pip[0].momentum()+pip[1].momentum()+pim[1].momentum()).mass(),1.); _h_4pi1_4pi ->fill((pip[0].momentum()+pip[1].momentum()+ pim[0].momentum()+pim[1].momentum()).mass(),1.); } else if(nstable==3 && eta.size()==1 && pip.size()==1 && pim.size()==1) { _h_eta1_etapip ->fill((eta[0].momentum()+pip[0].momentum()).mass(),1.); _h_eta1_etapim ->fill((eta[0].momentum()+pim[0].momentum()).mass(),1.); _h_eta1_pippim ->fill((pim[0].momentum()+pip[0].momentum()).mass(),1.); _h_eta1_etapippim->fill((eta[0].momentum()+pim[0].momentum()+pip[0].momentum()).mass(),1.); } else if(nstable==3 && eta.size()==1 && pi0.size()==2 ) { _h_eta0_etapi0 ->fill((eta[0].momentum()+pi0[0].momentum()).mass(),1.); _h_eta0_etapi0 ->fill((eta[0].momentum()+pi0[1].momentum()).mass(),1.); _h_eta0_pi0pi0 ->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),1.); _h_eta0_etapi0pi0->fill((eta[0].momentum()+pi0[0].momentum()+pi0[1].momentum()).mass(),1.); } } } /// Normalise histograms etc., after the run void finalize() { normalize(_h_eta0_etapi0 ); normalize(_h_eta0_pi0pi0 ); normalize(_h_eta0_etapi0pi0); normalize(_h_eta1_etapip); normalize(_h_eta1_etapim); normalize(_h_eta1_pippim); normalize(_h_eta1_etapippim); normalize(_h_4pi0_pi0pi0); normalize(_h_4pi0_pippi0); normalize(_h_4pi0_pimpi0); normalize(_h_4pi0_pippim); normalize(_h_4pi0_pippimpi0); normalize(_h_4pi0_pippi0pi0); normalize(_h_4pi0_pimpi0pi0); normalize(_h_4pi0_4pi); normalize(_h_4pi1_pippip); normalize(_h_4pi1_pimpim); normalize(_h_4pi1_pippim); normalize(_h_4pi1_pimpimpip); normalize(_h_4pi1_pippippim); normalize(_h_4pi1_4pi); } //@} // @name Histograms //@{ Histo1DPtr _h_eta0_etapi0,_h_eta0_pi0pi0,_h_eta0_etapi0pi0; Histo1DPtr _h_eta1_etapip,_h_eta1_etapim,_h_eta1_pippim,_h_eta1_etapippim; Histo1DPtr _h_4pi0_pi0pi0,_h_4pi0_pippi0,_h_4pi0_pimpi0,_h_4pi0_pippim,_h_4pi0_pippimpi0,_h_4pi0_pippi0pi0, _h_4pi0_pimpi0pi0,_h_4pi0_4pi; Histo1DPtr _h_4pi1_pippip,_h_4pi1_pimpim,_h_4pi1_pippim,_h_4pi1_pimpimpip,_h_4pi1_pippippim,_h_4pi1_4pi; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_F1_Decay); } diff --git a/analyses/pluginMC/MC_Meson_Meson_Leptons_Decay.cc b/analyses/pluginMC/MC_Meson_Meson_Leptons_Decay.cc --- a/analyses/pluginMC/MC_Meson_Meson_Leptons_Decay.cc +++ b/analyses/pluginMC/MC_Meson_Meson_Leptons_Decay.cc @@ -1,241 +1,247 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Tools/ParticleIdUtils.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_Meson_Meson_Leptons_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_Meson_Meson_Leptons_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(),"UFS"); } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles& lp, Particles& lm, Particles& scalar, Particles& vector) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::EMINUS || id == PID::MUON ) { lm.push_back(p); ++nstable; } else if (id == PID::EPLUS || id == PID::ANTIMUON) { lp.push_back(p); ++nstable; } else if (abs(id)%10==1 && PID::isMeson(id)) { scalar.push_back(p); ++nstable; } else if ((abs(id)%10==3 && PID::isMeson(id)) || id==PID::PHOTON ) { vector.push_back(p); ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p,nstable,lp,lm,scalar,vector); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { // loop over unstable particles for(const Particle& iMeson : apply(event, "UFS").particles()) { // only consider scalar/vector mesons - long pid = iMeson.pdgId(); + long pid = iMeson.pid(); if(!PID::isMeson(pid)) continue; if(abs(pid)%10!=3 and abs(pid)%10!=1 ) continue; Particles lp,lm,scalar,vector; unsigned int nstable(0); findDecayProducts(iMeson,nstable,lp,lm,scalar,vector); - if(nstable!=3 || lp.size()!=1 || lm.size()!=1 || lp[0].pdgId()!=-lm[0].pdgId()) continue; + if(nstable!=3 || lp.size()!=1 || lm.size()!=1 || lp[0].pid()!=-lm[0].pid()) continue; if(scalar.size()==1) { // check if we already have this decay unsigned int ix=0; bool found(false); while(!found&&ix<_incomingV.size()) { - if(_incomingV[ix]==pid && _outgoingP[ix]==scalar[0].pdgId() && - _outgoingf_V[ix]==lm[0].pdgId()) { + if(_incomingV[ix]==pid && _outgoingP[ix]==scalar[0].pid() && + _outgoingf_V[ix]==lm[0].pid()) { found=true; } else { ++ix; } } // create a new graph if needed if(!found) { ix=_incomingV.size(); _incomingV.push_back(pid); - _outgoingP.push_back(scalar[0].pdgId()); - _outgoingf_V.push_back(lm[0].pdgId()); - ostringstream title; + _outgoingP.push_back(scalar[0].pid()); + _outgoingf_V.push_back(lm[0].pid()); + std::ostringstream title; title << "h_" << abs(pid); if(pid>0) title << "p"; else title << "m"; - title << "_" << abs(scalar[0].pdgId()); - if(scalar[0].pdgId()>0) title << "p"; + title << "_" << abs(scalar[0].pid()); + if(scalar[0].pid()>0) title << "p"; else title << "m"; - title << "_" << lm[0].pdgId() << "_"; - _mff_V .push_back(bookHisto1D(title.str()+"mff" , 200, 0., iMeson.mass())); - _mPf .push_back(bookHisto1D(title.str()+"mPf" , 200, 0., iMeson.mass())); - _mPfbar.push_back(bookHisto1D(title.str()+"mPfbar", 200, 0., iMeson.mass())); + title << "_" << lm[0].pid() << "_"; + _mff_V .push_back(Histo1DPtr()); + book(_mff_V.back(), title.str()+"mff" , 200, 0., iMeson.mass()); + _mPf .push_back(Histo1DPtr()); + book(_mPf.back(), title.str()+"mPf" , 200, 0., iMeson.mass()); + _mPfbar.push_back(Histo1DPtr()); + book(_mPfbar.back(), title.str()+"mPfbar", 200, 0., iMeson.mass()); } // add the results to the histogram - _mff_V [ix]->fill((lm [0].momentum()+lp[0].momentum()).mass(),event.weight()); - _mPf [ix]->fill((scalar[0].momentum()+lm[0].momentum()).mass(),event.weight()); - _mPfbar[ix]->fill((scalar[0].momentum()+lp[0].momentum()).mass(),event.weight()); + _mff_V [ix]->fill((lm [0].momentum()+lp[0].momentum()).mass()); + _mPf [ix]->fill((scalar[0].momentum()+lm[0].momentum()).mass()); + _mPfbar[ix]->fill((scalar[0].momentum()+lp[0].momentum()).mass()); } else if(vector.size()==1) { // check if we already have this decay unsigned int ix=0; bool found(false); while(!found&&ix<_incoming_P.size()) { - if(_incoming_P[ix]==pid && _outgoingV[ix]==vector[0].pdgId() && - _outgoingf_P[ix]==lm[0].pdgId()) { + if(_incoming_P[ix]==pid && _outgoingV[ix]==vector[0].pid() && + _outgoingf_P[ix]==lm[0].pid()) { found=true; } else { ++ix; } } // create a new graph if needed if(!found) { ix=_incoming_P.size(); _incoming_P.push_back(pid); - _outgoingV.push_back(vector[0].pdgId()); - _outgoingf_P.push_back(lm[0].pdgId()); - ostringstream title; + _outgoingV.push_back(vector[0].pid()); + _outgoingf_P.push_back(lm[0].pid()); + std::ostringstream title; title << "h2_" << abs(pid); if(pid>0) title << "p"; else title << "m"; - title << "_" << abs(vector[0].pdgId()); - if(vector[0].pdgId()>0) title << "p"; + title << "_" << abs(vector[0].pid()); + if(vector[0].pid()>0) title << "p"; else title << "m"; - title << "_" << lm[0].pdgId() << "_"; - _mff_P .push_back(bookHisto1D(title.str()+"mff" , 200, 0., iMeson.mass())); - _mVf .push_back(bookHisto1D(title.str()+"mVf" , 200, 0., iMeson.mass())); - _mVfbar.push_back(bookHisto1D(title.str()+"mVfbar", 200, 0., iMeson.mass())); + title << "_" << lm[0].pid() << "_"; + _mff_V .push_back(Histo1DPtr()); + book(_mff_V.back(), title.str()+"mff" , 200, 0., iMeson.mass()); + _mPf .push_back(Histo1DPtr()); + book(_mPf.back(), title.str()+"mPf" , 200, 0., iMeson.mass()); + _mPfbar.push_back(Histo1DPtr()); + book(_mPfbar.back(), title.str()+"mPfbar", 200, 0., iMeson.mass()); } // add the results to the histogram - _mff_P [ix]->fill((lm [0].momentum()+lp[0].momentum()).mass(),event.weight()); - _mVf [ix]->fill((vector[0].momentum()+lm[0].momentum()).mass(),event.weight()); - _mVfbar[ix]->fill((vector[0].momentum()+lp[0].momentum()).mass(),event.weight()); + _mff_P [ix]->fill((lm [0].momentum()+lp[0].momentum()).mass()); + _mVf [ix]->fill((vector[0].momentum()+lm[0].momentum()).mass()); + _mVfbar[ix]->fill((vector[0].momentum()+lp[0].momentum()).mass()); } } } /// Normalise histograms etc., after the run void finalize() { // normalize to unity V->P for(unsigned int ix=0;ix<_mff_V.size();++ix) { normalize(_mff_V); normalize(_mPf); normalize(_mPfbar); } // normalize to unity P->V for(unsigned int ix=0;ix<_mff_P.size();++ix) { normalize(_mff_P); normalize(_mVf); normalize(_mVfbar); } } //@} /// @name Histograms for V -> P //@{ /** * PDG codes of the incoming particles */ vector _incomingV; /** * PDG codes of the outgoing pseudoscalar mesons */ vector _outgoingP; /** * PDG codes of the outgoing fermion */ vector _outgoingf_V; /** * Histograms for the mass of the fermion-antifermion pair */ vector _mff_V; /** * Histograms for the masses of the pseudoscalar and the fermion */ vector _mPf; /** * Histograms for the masses of the pseudoscalar and the antifermion */ vector _mPfbar; //@} /// @name Histograms P->V //@{ /** * PDG codes of the incoming_P particles */ vector _incoming_P; /** * PDG codes of the outgoing vector mesons */ vector _outgoingV; /** * PDG codes of the outgoing fermion */ vector _outgoingf_P; /** * Histograms for the mass of the fermion-antifermion pair */ vector _mff_P; /** * Histograms for the masses of the vector and the fermion */ vector _mVf; /** * Histograms for the masses of the vector and the antifermion */ vector _mVfbar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_Meson_Meson_Leptons_Decay); } diff --git a/analyses/pluginMC/MC_OmegaPhia1_3Pion_Decay.cc b/analyses/pluginMC/MC_OmegaPhia1_3Pion_Decay.cc --- a/analyses/pluginMC/MC_OmegaPhia1_3Pion_Decay.cc +++ b/analyses/pluginMC/MC_OmegaPhia1_3Pion_Decay.cc @@ -1,248 +1,253 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_OmegaPhia1_3Pion_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_OmegaPhia1_3Pion_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(), "UFS"); // book histograms a_1 // Histograms for a_10 -> pi0pi0pi0 - _hist0 = bookHisto1D("hist0",200,0.2,1.5); + book(_hist0, "hist0",200,0.2,1.5); // // dalitz plot - _dalitz0 = bookHisto2D("dalitz0",50,0.2,1.5,50,0.2,1.5); + book(_dalitz0, "dalitz0",50,0.2,1.5,50,0.2,1.5); // Histograms for a_1+ -> pi0pi0pi+ // Mass of the pi0pi0 pair - _hist1A = bookHisto1D("hist1A",200,0.2,1.5); + book(_hist1A, "hist1A",200,0.2,1.5); // Mass of the pi0pi+ pair - _hist1B = bookHisto1D("hist1B",200,0.2,1.5); + book(_hist1B, "hist1B",200,0.2,1.5); // dalitz plot - _dalitz1 = bookHisto2D("dalitz1",50,0.2,1.5,50,0.2,1.5); + book(_dalitz1, "dalitz1",50,0.2,1.5,50,0.2,1.5); // Histograms for a_10 -> pi+pi-pi0 // Mass of the pi+pi- pair - _hist2A = bookHisto1D("hist2A",200,0.2,1.5); + book(_hist2A, "hist2A",200,0.2,1.5); // Mass of the pi+pi0 pair - _hist2B = bookHisto1D("hist2B",200,0.2,1.5); + book(_hist2B, "hist2B",200,0.2,1.5); // Mass of the pi-pi0 pair - _hist2C = bookHisto1D("hist2C",200,0.2,1.5); + book(_hist2C, "hist2C",200,0.2,1.5); // dalitz plot - _dalitz2 = bookHisto2D("dalitz2",50,0.2,1.5,50,0.2,1.5); + book(_dalitz2, "dalitz2",50,0.2,1.5,50,0.2,1.5); // Histograms for a_1+ -> pi+pi+pi- // Mass of the pi+pi+ pair - _hist3A = bookHisto1D("hist3A",200,0.2,1.5); + book(_hist3A, "hist3A",200,0.2,1.5); // Mass of the pi+pi- pair - _hist3B = bookHisto1D("hist3B",200,0.2,1.5); + book(_hist3B, "hist3B",200,0.2,1.5); // dalitz plot - _dalitz3 = bookHisto2D("dalitz3",50,0.2,1.5,50,0.2,1.5); + book(_dalitz3, "dalitz3",50,0.2,1.5,50,0.2,1.5); // Book histograms omega/phi for(unsigned int ix=0;ix<2;++ix) { double mmax = ix==0 ? 0.8 : 1.0; - ostringstream title1; title1 << "xhist_" << ix+1; - _h_xhist .push_back(bookHisto1D(title1.str(),200,-300.,300. )); - ostringstream title2; title2 << "yhist_" << ix+1; - _h_yhist .push_back(bookHisto1D(title2.str(),200,0. ,400. )); - ostringstream title3; title3 << "mplus_" << ix+1; - _h_mplus .push_back(bookHisto1D(title3.str(),200,200.,mmax*1000.)); - ostringstream title4; title4 << "mminus_" << ix+1; - _h_mminus .push_back(bookHisto1D(title4.str(),200,200.,mmax*1000.)); - ostringstream title5; title5 << "m0_" << ix+1; - _h_m0 .push_back(bookHisto1D(title5.str(),200,200.,mmax*1000.)); - ostringstream title6; title6 << "dalitz_" << ix+1; - _h_dalitz.push_back(bookHisto2D(title6.str(),50,0.2,mmax,50,0.2,mmax)); + std::ostringstream title1; title1 << "xhist_" << ix+1; + _h_xhist .push_back(Histo1DPtr()); + book(_h_xhist.back(), title1.str(),200,-300.,300. ); + std::ostringstream title2; title2 << "yhist_" << ix+1; + _h_yhist .push_back(Histo1DPtr()); + book(_h_yhist.back(), title2.str(),200,0. ,400. ); + std::ostringstream title3; title3 << "mplus_" << ix+1; + _h_mplus .push_back(Histo1DPtr()); + book(_h_mplus.back(), title3.str(),200,200.,mmax*1000.); + std::ostringstream title4; title4 << "mminus_" << ix+1; + _h_mminus .push_back(Histo1DPtr()); + book(_h_mminus.back(), title4.str(),200,200.,mmax*1000.); + std::ostringstream title5; title5 << "m0_" << ix+1; + _h_m0 .push_back(Histo1DPtr()); + book(_h_m0.back(), title5.str(),200,200.,mmax*1000.); + std::ostringstream title6; title6 << "dalitz_" << ix+1; + _h_dalitz.push_back(Histo2DPtr()); + book(_h_dalitz.back(), title6.str(),50,0.2,mmax,50,0.2,mmax); } } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles & pip , Particles & pim , Particles & pi0) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if (id == PID::PIPLUS) { pip.push_back(p); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::PI0) { pi0.push_back(p); ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p, nstable, pip, pim, pi0); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { - double weight = event.weight(); for(const Particle& meson : apply(event, "UFS").particles(Cuts::pid==PID::PHI || Cuts::pid==PID::OMEGA || Cuts::abspid==20213 || Cuts::pid==20113 )) { unsigned int nstable(0); Particles pip, pim, pi0; findDecayProducts(meson, nstable, pip, pim, pi0); if(nstable !=3) continue; - if(meson.pdgId()<0) { + if(meson.pid()<0) { swap(pim,pip); } - if(meson.pdgId()== PID::PHI || meson.pdgId()==PID::OMEGA) { + if(meson.pid()== PID::PHI || meson.pid()==PID::OMEGA) { if(pip.size()!=1 || pim.size()!=1 || pi0.size()!=1) continue; - unsigned int iloc = meson.pdgId() == PID::OMEGA ? 0 : 1; + unsigned int iloc = meson.pid() == PID::OMEGA ? 0 : 1; LorentzTransform boost = LorentzTransform::mkFrameTransformFromBeta(meson.momentum().betaVec()); FourMomentum pp = boost.transform(pip[0].momentum()); FourMomentum pm = boost.transform(pim[0].momentum()); FourMomentum p0 = boost.transform(pi0[0].momentum()); double mp = (pp+p0).mass(), mm = (pm+pp).mass(); - _h_mplus [iloc]->fill(mp/MeV,weight); - _h_mminus[iloc]->fill((pm+p0).mass()/MeV,weight); - _h_m0 [iloc]->fill(mm/MeV,weight); + _h_mplus [iloc]->fill(mp/MeV); + _h_mminus[iloc]->fill((pm+p0).mass()/MeV); + _h_m0 [iloc]->fill(mm/MeV); double x = pp.t()-pm.t(); double y = p0.t()-p0.mass(); - _h_xhist[iloc]->fill(x/MeV,weight); - _h_yhist[iloc]->fill(y/MeV,weight); - _h_dalitz[iloc]->fill(mp,mm,weight); + _h_xhist[iloc]->fill(x/MeV); + _h_yhist[iloc]->fill(y/MeV); + _h_dalitz[iloc]->fill(mp,mm); } else { // a_1+ -> pi+pi+pi- if(pip.size()==2&&pim.size()==1) { - _hist3A->fill((pip[0].momentum()+pip[1].momentum()).mass(),weight); - _hist3B->fill((pip[0].momentum()+pim[0].momentum()).mass(),weight); - _hist3B->fill((pip[1].momentum()+pim[0].momentum()).mass(),weight); - _dalitz3->fill((pip[0].momentum()+pim[0].momentum()).mass(),(pip[1].momentum()+pim[0].momentum()).mass(),weight); - _dalitz3->fill((pip[1].momentum()+pim[0].momentum()).mass(),(pip[0].momentum()+pim[0].momentum()).mass(),weight); + _hist3A->fill((pip[0].momentum()+pip[1].momentum()).mass()); + _hist3B->fill((pip[0].momentum()+pim[0].momentum()).mass()); + _hist3B->fill((pip[1].momentum()+pim[0].momentum()).mass()); + _dalitz3->fill((pip[0].momentum()+pim[0].momentum()).mass(),(pip[1].momentum()+pim[0].momentum()).mass()); + _dalitz3->fill((pip[1].momentum()+pim[0].momentum()).mass(),(pip[0].momentum()+pim[0].momentum()).mass()); } // a_1+ -> pi0pi0pi+ else if(pip.size()==1&&pi0.size()==2) { - _hist1A->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _hist1B->fill((pip[0].momentum()+pi0[0].momentum()).mass(),weight); - _hist1B->fill((pip[0].momentum()+pi0[1].momentum()).mass(),weight); - _dalitz1->fill((pip[0].momentum()+pi0[0].momentum()).mass(),(pip[0].momentum()+pi0[1].momentum()).mass(),weight); - _dalitz1->fill((pip[0].momentum()+pi0[1].momentum()).mass(),(pip[0].momentum()+pi0[0].momentum()).mass(),weight); + _hist1A->fill((pi0[0].momentum()+pi0[1].momentum()).mass()); + _hist1B->fill((pip[0].momentum()+pi0[0].momentum()).mass()); + _hist1B->fill((pip[0].momentum()+pi0[1].momentum()).mass()); + _dalitz1->fill((pip[0].momentum()+pi0[0].momentum()).mass(),(pip[0].momentum()+pi0[1].momentum()).mass()); + _dalitz1->fill((pip[0].momentum()+pi0[1].momentum()).mass(),(pip[0].momentum()+pi0[0].momentum()).mass()); } // a_10 -> pi0pi0pi0 else if(pi0.size()==3) { - _hist0->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _hist0->fill((pi0[0].momentum()+pi0[2].momentum()).mass(),weight); - _hist0->fill((pi0[1].momentum()+pi0[2].momentum()).mass(),weight); - _dalitz0->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),(pi0[0].momentum()+pi0[2].momentum()).mass(),weight); - _dalitz0->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),(pi0[1].momentum()+pi0[2].momentum()).mass(),weight); - _dalitz0->fill((pi0[0].momentum()+pi0[2].momentum()).mass(),(pi0[1].momentum()+pi0[2].momentum()).mass(),weight); - _dalitz0->fill((pi0[0].momentum()+pi0[2].momentum()).mass(),(pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _dalitz0->fill((pi0[1].momentum()+pi0[2].momentum()).mass(),(pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _dalitz0->fill((pi0[1].momentum()+pi0[2].momentum()).mass(),(pi0[0].momentum()+pi0[2].momentum()).mass(),weight); + _hist0->fill((pi0[0].momentum()+pi0[1].momentum()).mass()); + _hist0->fill((pi0[0].momentum()+pi0[2].momentum()).mass()); + _hist0->fill((pi0[1].momentum()+pi0[2].momentum()).mass()); + _dalitz0->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),(pi0[0].momentum()+pi0[2].momentum()).mass()); + _dalitz0->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),(pi0[1].momentum()+pi0[2].momentum()).mass()); + _dalitz0->fill((pi0[0].momentum()+pi0[2].momentum()).mass(),(pi0[1].momentum()+pi0[2].momentum()).mass()); + _dalitz0->fill((pi0[0].momentum()+pi0[2].momentum()).mass(),(pi0[0].momentum()+pi0[1].momentum()).mass()); + _dalitz0->fill((pi0[1].momentum()+pi0[2].momentum()).mass(),(pi0[0].momentum()+pi0[1].momentum()).mass()); + _dalitz0->fill((pi0[1].momentum()+pi0[2].momentum()).mass(),(pi0[0].momentum()+pi0[2].momentum()).mass()); } // a_10 -> pi+pi-pi0 else if(pi0.size()==1&&pip.size()==1&&pim.size()==1) { - _hist2A->fill((pim[0].momentum()+pip[0].momentum()).mass(),weight); - _hist2B->fill((pip[0].momentum()+pi0[0].momentum()).mass(),weight); - _hist2C->fill((pim[0].momentum()+pi0[0].momentum()).mass(),weight); - _dalitz2->fill((pim[0].momentum()+pi0[0].momentum()).mass(),(pip[0].momentum()+pi0[0].momentum()).mass(),weight); + _hist2A->fill((pim[0].momentum()+pip[0].momentum()).mass()); + _hist2B->fill((pip[0].momentum()+pi0[0].momentum()).mass()); + _hist2C->fill((pim[0].momentum()+pi0[0].momentum()).mass()); + _dalitz2->fill((pim[0].momentum()+pi0[0].momentum()).mass(),(pip[0].momentum()+pi0[0].momentum()).mass()); } } } } /// Normalise histograms etc., after the run void finalize() { // a_1 normalize(_hist0); normalize(_dalitz0); normalize(_hist1A); normalize(_hist1B); normalize(_dalitz1); normalize(_hist2A); normalize(_hist2B); normalize(_hist2C); normalize(_dalitz2); normalize(_hist3A); normalize(_hist3B); normalize(_dalitz3); // omega/phi for(unsigned int ix=0;ix<2;++ix) { normalize(_h_xhist [ix]); normalize(_h_yhist [ix]); normalize(_h_mplus [ix]); normalize(_h_mminus[ix]); normalize(_h_m0 [ix]); normalize(_h_dalitz[ix]); } } //@} /// @name Histograms a_1 //@{ // Histograms for a_10 -> pi0pi0pi0 Histo1DPtr _hist0; // dalitz plot Histo2DPtr _dalitz0; // Histograms for a_1+ -> pi0pi0pi+ // Mass of the pi0pi0 pair Histo1DPtr _hist1A; // Mass of the pi0pi+ pair Histo1DPtr _hist1B; // dalitz plot Histo2DPtr _dalitz1; // Histograms for a_10 -> pi+pi-pi0 // Mass of the pi+pi- pair Histo1DPtr _hist2A; // Mass of the pi+pi0 pair Histo1DPtr _hist2B; // Mass of the pi-pi0 pair Histo1DPtr _hist2C; // dalitz plot Histo2DPtr _dalitz2; // Histograms for a_1+ -> pi+pi+pi- // Mass of the pi+pi+ pair Histo1DPtr _hist3A; // Mass of the pi+pi- pair Histo1DPtr _hist3B; // dalitz plot Histo2DPtr _dalitz3; //@} /// @name Histograms omega/phi //@{ // Histogram for the x-values vector _h_xhist; // Histogram for the y-values vector _h_yhist; // The mass of the \rho^+ vector _h_mplus; // The mass of the \rho^- vector _h_mminus; // The mass of the \rho^0 vector _h_m0; // Dalitz plot vector _h_dalitz; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_OmegaPhia1_3Pion_Decay); } diff --git a/analyses/pluginMC/MC_Onium_PiPi_Decay.cc b/analyses/pluginMC/MC_Onium_PiPi_Decay.cc --- a/analyses/pluginMC/MC_Onium_PiPi_Decay.cc +++ b/analyses/pluginMC/MC_Onium_PiPi_Decay.cc @@ -1,152 +1,153 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_Onium_PiPi_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_Onium_PiPi_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(),"UFS"); } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles& pip, Particles& pim, Particles& pi0, Particles & onium) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::PIPLUS) { pim.push_back(p); ++nstable; } else if (id == PID::PI0) { pi0.push_back(p); ++nstable; } else if (abs(id)%1000==443 || abs(id)%1000==553) { onium.push_back(p); ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p,nstable,pip,pim,pi0,onium); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { - double weight = event.weight(); // loop over unstable particles for(const Particle& vMeson : apply(event, "UFS").particles()) { - int id = vMeson.pdgId(); + int id = vMeson.pid(); if(id%1000!=443 && id%1000!=553) continue; unsigned int nstable(0); Particles pip, pim, pi0, onium; findDecayProducts(vMeson,nstable,pip,pim,pi0,onium); // check for onium if(onium.size() !=1 || nstable !=3) continue; // check for pipi if( ! ((pip.size()==1 && pim.size() ==1) || pi0.size()==2)) continue; // check if histos already made unsigned int iloc=0; bool found(false); while(!found&&iloc<_incoming.size()) { - if(_incoming[iloc]==vMeson.pdgId()&&_outgoing[iloc]==onium[0].pdgId()) found=true; + if(_incoming[iloc]==vMeson.pid()&&_outgoing[iloc]==onium[0].pid()) found=true; else ++iloc; } // if histos not made, make them if(!found) { double twompi = 0.378; double upp = vMeson.mass()-onium[0].mass(); iloc=_incoming.size(); - _incoming.push_back(vMeson.pdgId()); - _outgoing.push_back(onium[0].pdgId()); - ostringstream title; - title << "h_" << vMeson.pdgId() << "_" << onium[0].pdgId() << "_"; - _mpipi.push_back(make_pair(bookHisto1D(title.str()+"mpippim",200,twompi/GeV,upp/GeV), - bookHisto1D(title.str()+"mpi0pi0",200,twompi/GeV,upp/GeV))); - _hel .push_back(make_pair(bookHisto1D(title.str()+"hpippim",200,-1.,1.), - bookHisto1D(title.str()+"hpi0pi0",200, 0.,1.))); + _incoming.push_back(vMeson.pid()); + _outgoing.push_back(onium[0].pid()); + std::ostringstream title; + title << "h_" << vMeson.pid() << "_" << onium[0].pid() << "_"; + _mpipi.push_back(make_pair(Histo1DPtr(), Histo1DPtr())); + book(_mpipi.back().first, title.str()+"mpippim",200,twompi/GeV,upp/GeV); + book(_mpipi.back().second, title.str()+"mpi0pi0",200,twompi/GeV,upp/GeV); + _hel.push_back(make_pair(Histo1DPtr(), Histo1DPtr())); + book(_hel.back().first, title.str()+"hpippim",200,-1.,1.); + book(_hel.back().second, title.str()+"hpi0pi0",200, 0.,1.); } // boost to rest frame of the pion pair FourMomentum q = vMeson.momentum()-onium[0].momentum(); LorentzTransform boost = LorentzTransform::mkFrameTransformFromBeta(q.betaVec()); FourMomentum qp = onium[0].momentum(); FourMomentum ppi= pip.size()==1 ? pip[0].momentum() : pi0[0].momentum(); qp = boost.transform(qp); ppi = boost.transform(ppi); double cX=-ppi.p3().unit().dot(qp.p3().unit()); if(pi0.size()==2) { - _mpipi[iloc].second->fill(q.mass(),weight); - _hel [iloc].second->fill(abs(cX),weight); + _mpipi[iloc].second->fill(q.mass()); + _hel [iloc].second->fill(abs(cX)); } else { - _mpipi[iloc].first->fill(q.mass(),weight); - _hel [iloc].first->fill(cX,weight); + _mpipi[iloc].first->fill(q.mass()); + _hel [iloc].first->fill(cX); } } } /// Normalise histograms etc., after the run void finalize() { // normalize to unity for(unsigned int ix=0;ix<_mpipi.size();++ix) { normalize(_mpipi[ix].first ); normalize(_mpipi[ix].second); normalize(_hel[ix].first ); normalize(_hel[ix].second); } } //@} /** * Incoming onium states */ vector _incoming; /** * Outgoing onium states */ vector _outgoing; /** * Histograms for the \f$\pi^+\pi^-\f$ masses */ vector > _mpipi; /** * Histmgrams for the helicity angles */ vector > _hel; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_Onium_PiPi_Decay); } diff --git a/analyses/pluginMC/MC_PARTONICTOPS.cc b/analyses/pluginMC/MC_PARTONICTOPS.cc --- a/analyses/pluginMC/MC_PARTONICTOPS.cc +++ b/analyses/pluginMC/MC_PARTONICTOPS.cc @@ -1,128 +1,126 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/PartonicTops.hh" namespace Rivet { /// Find and plot partonic top properties (requires tops in event record) class MC_PARTONICTOPS : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_PARTONICTOPS); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections - declare(PartonicTops(PartonicTops::ALL), "AllTops"); - declare(PartonicTops(PartonicTops::ALL, true, false, Cuts::OPEN, PartonicTops::FIRST), "AllTopsFirst"); ///< @todo API ick! - declare(PartonicTops(PartonicTops::E_MU), "LeptonicTops"); - declare(PartonicTops(PartonicTops::HADRONIC), "HadronicTops"); + declare(PartonicTops(PartonicTops::DecayMode::ALL), "AllTops"); + declare(PartonicTops(PartonicTops::DecayMode::ALL, true, false, Cuts::OPEN, PartonicTops::WhichTop::FIRST), "AllTopsFirst"); ///< @todo API ick! + declare(PartonicTops(PartonicTops::DecayMode::E_MU), "LeptonicTops"); + declare(PartonicTops(PartonicTops::DecayMode::HADRONIC), "HadronicTops"); // Book histograms - _h_tall_n = bookHisto1D("t_all_n", linspace(5, -0.5, 4.5)); - _h_tall_pt = bookHisto1D("t_all_pT", logspace(50, 1, 500)); - _h_tall_y = bookHisto1D("t_all_y", linspace(50, -5, 5)); + book(_h_tall_n, "t_all_n", linspace(5, -0.5, 4.5)); + book(_h_tall_pt, "t_all_pT", logspace(50, 1, 500)); + book(_h_tall_y, "t_all_y", linspace(50, -5, 5)); - _h_tall_n_first = bookHisto1D("t_all_n_firsttop", linspace(5, -0.5, 4.5)); - _h_tall_pt_first = bookHisto1D("t_all_pT_firsttop", logspace(50, 1, 500)); - _h_tall_y_first = bookHisto1D("t_all_y_firsttop", linspace(50, -5, 5)); + book(_h_tall_n_first, "t_all_n_firsttop", linspace(5, -0.5, 4.5)); + book(_h_tall_pt_first, "t_all_pT_firsttop", logspace(50, 1, 500)); + book(_h_tall_y_first, "t_all_y_firsttop", linspace(50, -5, 5)); - _h_tall_pt_dfirstlast = bookHisto1D("t_all_pT_dfirstlast", linspace(100, -100, 100)); - _p_tall_pt_dfirstlast = bookProfile1D("t_all_pT_dfirstlast_prof", logspace(50, 1, 500)); - // _h_tall_y_dfirstlast = bookHisto1D("t_all_y_dfirstlast", linspace(50, -2, 2)); - // _p_tall_y_dfirstlast = bookProfile1D("t_all_y_dfirstlast_prof", linspace(50, -5, 5)); + book(_h_tall_pt_dfirstlast, "t_all_pT_dfirstlast", linspace(100, -100, 100)); + book(_p_tall_pt_dfirstlast, "t_all_pT_dfirstlast_prof", logspace(50, 1, 500)); - _h_tlep_n = bookHisto1D("t_lep_n", linspace(5, -0.5, 4.5)); - _h_tlep_pt = bookHisto1D("t_lep_pT", logspace(50, 1, 500)); - _h_tlep_y = bookHisto1D("t_lep_y", linspace(50, -5, 5)); + book(_h_tlep_n, "t_lep_n", linspace(5, -0.5, 4.5)); + book(_h_tlep_pt, "t_lep_pT", logspace(50, 1, 500)); + book(_h_tlep_y, "t_lep_y", linspace(50, -5, 5)); - _h_thad_n = bookHisto1D("t_had_n", linspace(5, -0.5, 4.5)); - _h_thad_pt = bookHisto1D("t_had_pT", logspace(50, 1, 500)); - _h_thad_y = bookHisto1D("t_had_y", linspace(50, -5, 5)); + book(_h_thad_n, "t_had_n", linspace(5, -0.5, 4.5)); + book(_h_thad_pt, "t_had_pT", logspace(50, 1, 500)); + book(_h_thad_y, "t_had_y", linspace(50, -5, 5)); } /// Perform the per-event analysis void analyze(const Event& event) { // Last tops (standard) const Particles& alltops = apply(event, "AllTops").particlesByPt(); _h_tall_n->fill(alltops.size()); for (const Particle& t : alltops) { _h_tall_pt->fill(t.pT()/GeV); _h_tall_y->fill(t.rap()); } // First tops const Particles& alltops_first = apply(event, "AllTopsFirst").particlesByPt(); _h_tall_n_first->fill(alltops_first.size()); for (const Particle& t : alltops_first) { _h_tall_pt_first->fill(t.pT()/GeV); _h_tall_y_first->fill(t.rap()); } // Match first and last tops for (const Particle& tf : alltops_first) { for (const Particle& tl : alltops) { //if (deltaR(tf, tl) > 1) continue; if (tf.pid() != tl.pid()) continue; const double dpt = tl.pT() - tf.pT(); //< defined as change due to PS _h_tall_pt_dfirstlast->fill(dpt/GeV); _p_tall_pt_dfirstlast->fill(tf.pT()/GeV, fabs(dpt)/GeV); } } // Leptonic (last) tops const Particles& leptops = apply(event, "LeptonicTops").particlesByPt(); _h_tlep_n->fill(leptops.size()); for (const Particle& t : leptops) { _h_tlep_pt->fill(t.pT()/GeV); _h_tlep_y->fill(t.rap()); } // Hadronic (last) tops const Particles& hadtops = apply(event, "HadronicTops").particlesByPt(); _h_thad_n->fill(hadtops.size()); for (const Particle& t : hadtops) { _h_thad_pt->fill(t.pT()/GeV); _h_thad_y->fill(t.rap()); } } /// Normalise histograms etc., after the run void finalize() { normalize({_h_tall_n, _h_tall_n_first, _h_tlep_n, _h_thad_n}); normalize({_h_tall_pt, _h_tall_pt_first, _h_tlep_pt, _h_thad_pt}); normalize({_h_tall_y, _h_tall_y_first, _h_tlep_y, _h_thad_y}); normalize(_h_tall_pt_dfirstlast); } //@} /// @name Histograms //@{ Histo1DPtr _h_tall_n, _h_tall_n_first, _h_tlep_n, _h_thad_n; Histo1DPtr _h_tall_pt, _h_tall_pt_first, _h_tlep_pt, _h_thad_pt; Histo1DPtr _h_tall_y, _h_tall_y_first, _h_tlep_y, _h_thad_y; Histo1DPtr _h_tall_pt_dfirstlast; Profile1DPtr _p_tall_pt_dfirstlast; //@} }; DECLARE_RIVET_PLUGIN(MC_PARTONICTOPS); } diff --git a/analyses/pluginMC/MC_Semi_Leptonic_Decay.cc b/analyses/pluginMC/MC_Semi_Leptonic_Decay.cc --- a/analyses/pluginMC/MC_Semi_Leptonic_Decay.cc +++ b/analyses/pluginMC/MC_Semi_Leptonic_Decay.cc @@ -1,180 +1,181 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_Semi_Leptonic_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_Semi_Leptonic_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(), "UFS"); } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles& lp, Particles& lm, Particles& nu, Particles& nub, Particles& out) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::EMINUS || id == PID::MUON ) { lm .push_back(p); ++nstable; } else if (id == PID::EPLUS || id == PID::ANTIMUON) { lp .push_back(p); ++nstable; } else if ( id == PID::NU_E || id == PID::NU_EBAR ) { nu .push_back(p); ++nstable; } else if (id == PID::NU_MU || id == PID::NU_MUBAR ) { nub.push_back(p); ++nstable; } else if (PID::isMeson(id)) { out.push_back(p); ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p,nstable,lp,lm,nu,nub,out); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { - double weight = event.weight(); // loop over unstable particles for(const Particle& meson : apply(event, "UFS").particles()) { - int id = meson.pdgId(); + int id = meson.pid(); // spin 0 mesons if(!PID::isMeson(id)) continue; if(abs(id)%10!=1) continue; unsigned int nstable(0); Particles lp, lm, nu, nub, out; findDecayProducts(meson,nstable,lp,lm,nu,nub,out); if(nstable!=3 || out.size()!=1) continue; int ilep=0; FourMomentum plep,pmnu=out[0].momentum(); double me2(0.); if( lp.size()==1 && nu.size()==1 && out.size()==1 ) { - if(nu[0].pdgId() != -lp[0].pdgId()+1) continue; - ilep = lp[0].pdgId(); + if(nu[0].pid() != -lp[0].pid()+1) continue; + ilep = lp[0].pid(); plep = nu[0].momentum()+lp[0].momentum(); pmnu += nu[0].momentum(); me2 = lp[0].mass2(); } else if( lm.size()==1 && nub.size()==1 && out.size()==1 ) { - if(nub[0].pdgId() != -lm[0].pdgId()-1) continue; - ilep = lm[0].pdgId(); + if(nub[0].pid() != -lm[0].pid()-1) continue; + ilep = lm[0].pid(); plep = nub[0].momentum()+lm[0].momentum(); pmnu += nub[0].momentum(); me2 = lm[0].mass2(); } else continue; // check if histos already exist unsigned int iloc=0; bool found(false); while(!found&&iloc<_incoming.size()) { if(_incoming[iloc] == id && - _outgoing[iloc] == out[0].pdgId() && + _outgoing[iloc] == out[0].pid() && ilep==_outgoingL[iloc]) found=true; else ++iloc; } if(!found) { iloc=_incoming.size(); _incoming.push_back(id); - _outgoing.push_back(out[0].pdgId()); + _outgoing.push_back(out[0].pid()); _outgoingL.push_back(ilep); - ostringstream title; + std::ostringstream title; title << "h_" << abs(id); if(id>0) title << "p"; else title << "m"; - title << "_" << abs(out[0].pdgId()); - if(out[0].pdgId()>0) title << "p"; + title << "_" << abs(out[0].pid()); + if(out[0].pid()>0) title << "p"; else title << "m"; title << "_" << abs(ilep); if(ilep>0) title << "p"; else title << "m"; title << "_"; - _energy.push_back(bookHisto1D(title.str()+"energy", - 200,0.0,0.5*meson.mass()/MeV)); - _scale .push_back(bookHisto1D(title.str()+"scale", - 200,0.0,meson.mass()/MeV)); + _energy.push_back(Histo1DPtr()); + book(_energy.back(), title.str()+"energy", + 200,0.0,0.5*meson.mass()/MeV); + _scale .push_back(Histo1DPtr()); + book(_scale.back(), title.str()+"scale", + 200,0.0,meson.mass()/MeV); } // add the results to the histogram - _scale[iloc]->fill(plep.mass()/MeV,weight); + _scale[iloc]->fill(plep.mass()/MeV); double ee = 0.5/meson.mass()*(meson.mass2()-pmnu.mass2()+me2); - _energy[iloc]->fill(ee/MeV,weight); + _energy[iloc]->fill(ee/MeV); } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=0;ix<_energy.size();++ix) { normalize(_energy); normalize(_scale ); } } //@} /// @name Histograms //@{ /** * PDG codes of the decaying mesons */ vector _incoming; /** * PDG codes of the decay products */ vector _outgoing; /** * Identidies of the leptons */ vector _outgoingL; /** * Histograms */ //@{ /** * The lepton energy */ vector _energy; /** * The \f$q\f$ value */ vector _scale; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_Semi_Leptonic_Decay); } diff --git a/analyses/pluginMC/MC_TAU_Decay.cc b/analyses/pluginMC/MC_TAU_Decay.cc --- a/analyses/pluginMC/MC_TAU_Decay.cc +++ b/analyses/pluginMC/MC_TAU_Decay.cc @@ -1,645 +1,671 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" +#include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief Add a short analysis description here class MC_TAU_Decay : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(MC_TAU_Decay); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections declare(UnstableParticles(),"UFS"); // Book histograms // leptonic - _h_2B_m2enu=bookHisto1D("h_2B_m2enu", 200,0.,3.15); - _h_2B_menu =bookHisto1D("h_2B_menu" , 200,0.,1.8 ); + book(_h_2B_m2enu, "h_2B_m2enu", 200,0.,3.15); + book(_h_2B_menu, "h_2B_menu" , 200,0.,1.8 ); // hadronic // 2 hadrons - _h_2B_m2pipi=bookHisto1D("h_2B_m2pipi", 200,0.,3.15); - _h_2B_mpipi =bookHisto1D("h_2B_mpipi" , 200,0.,1.8 ); - _h_2B_m2munu=bookHisto1D("h_2B_m2munu", 200,0.,3.15); - _h_2B_mmunu =bookHisto1D("h_2B_mmunu" , 200,0.,1.8 ); - _h_2B_m2KpiA=bookHisto1D("h_2B_m2KpiA", 200,0.,3.15); - _h_2B_mKpiA =bookHisto1D("h_2B_mKpiA" , 200,0.,1.8 ); - _h_2B_m2KpiB=bookHisto1D("h_2B_m2KpiB", 200,0.,3.15); - _h_2B_mKpiB =bookHisto1D("h_2B_mKpiB" , 200,0.,1.8 ); - _h_2B_m2Keta=bookHisto1D("h_2B_m2Keta", 200,0.,3.15); - _h_2B_mKeta =bookHisto1D("h_2B_mKeta" , 200,0.,1.8 ); - _h_2B_m2KK =bookHisto1D("h_2B_m2KK" , 200,0.,3.15); - _h_2B_mKK =bookHisto1D("h_2B_mKK" , 200,0.,1.8 ); + book(_h_2B_m2pipi, "h_2B_m2pipi", 200,0.,3.15); + book(_h_2B_mpipi, "h_2B_mpipi" , 200,0.,1.8 ); + book(_h_2B_m2munu, "h_2B_m2munu", 200,0.,3.15); + book(_h_2B_mmunu, "h_2B_mmunu" , 200,0.,1.8 ); + book(_h_2B_m2KpiA, "h_2B_m2KpiA", 200,0.,3.15); + book(_h_2B_mKpiA, "h_2B_mKpiA" , 200,0.,1.8 ); + book(_h_2B_m2KpiB, "h_2B_m2KpiB", 200,0.,3.15); + book(_h_2B_mKpiB, "h_2B_mKpiB" , 200,0.,1.8 ); + book(_h_2B_m2Keta, "h_2B_m2Keta", 200,0.,3.15); + book(_h_2B_mKeta, "h_2B_mKeta" , 200,0.,1.8 ); + book(_h_2B_m2KK, "h_2B_m2KK" , 200,0.,3.15); + book(_h_2B_mKK, "h_2B_mKK" , 200,0.,1.8 ); // 3 hadrons + Histo1DPtr dummy; for(unsigned int ix=0;ix<4;++ix) { if(ix<3) { - ostringstream title1 ; title1 << "h_3B_pippimpim_" << ix+1; - _h_3B_pippimpim .push_back(bookHisto1D(title1 .str(),200,0.,1.8)); - ostringstream title2 ; title2 << "h_3B_pi0pi0pim_" << ix+1; - _h_3B_pi0pi0pim .push_back(bookHisto1D(title2 .str(),200,0.,1.8)); - ostringstream title5 ; title5 << "h_3B_pi0pi0km_" << ix+1; - _h_3B_pi0pi0km .push_back(bookHisto1D(title5 .str(),200,0.,1.8)); - ostringstream title10; title10 << "h_3B_kspimks_" << ix+1; - _h_3B_kspimks .push_back(bookHisto1D(title10.str(),200,0.,1.8)); - ostringstream title11; title11 << "h_3B_klpimkl_" << ix+1; - _h_3B_klpimkl .push_back(bookHisto1D(title11.str(),200,0.,1.8)); + std::ostringstream title1 ; title1 << "h_3B_pippimpim_" << ix+1; + book(dummy, title1 .str(),200,0.,1.8); + _h_3B_pippimpim .push_back(dummy); + std::ostringstream title2 ; title2 << "h_3B_pi0pi0pim_" << ix+1; + book(dummy, title2 .str(),200,0.,1.8); + _h_3B_pi0pi0pim .push_back(dummy); + std::ostringstream title5 ; title5 << "h_3B_pi0pi0km_" << ix+1; + book(dummy, title5 .str(),200,0.,1.8); + _h_3B_pi0pi0km .push_back(dummy); + std::ostringstream title10; title10 << "h_3B_kspimks_" << ix+1; + book(dummy, title10.str(),200,0.,1.8); + _h_3B_kspimks .push_back(dummy); + std::ostringstream title11; title11 << "h_3B_klpimkl_" << ix+1; + book(dummy, title11.str(),200,0.,1.8); + _h_3B_klpimkl .push_back(dummy); } - ostringstream title3 ; title3 << "h_3B_kmpimkp_" << ix+1; - _h_3B_kmpimkp .push_back(bookHisto1D(title3 .str(),200,0.,1.8)); - ostringstream title4 ; title4 << "h_3B_kmpi0k0_" << ix+1; - _h_3B_kmpi0k0 .push_back(bookHisto1D(title4 .str(),200,0.,1.8)); - ostringstream title6 ; title6 << "h_3B_kmpimpip_" << ix+1; - _h_3B_kmpimpip .push_back(bookHisto1D(title6 .str(),200,0.,1.8)); - ostringstream title7 ; title7 << "h_3B_pimk0pi0_" << ix+1; - _h_3B_pimk0pi0 .push_back(bookHisto1D(title7 .str(),200,0.,1.8)); - ostringstream title8 ; title8 << "h_3B_pimpi0eta_" << ix+1; - _h_3B_pimpi0eta .push_back(bookHisto1D(title8 .str(),200,0.,1.8)); - ostringstream title9 ; title9 << "h_3B_pimpi0gamma_" << ix+1; - _h_3B_pimpi0gamma .push_back(bookHisto1D(title9 .str(),200,0.,1.8)); - ostringstream title12; title12 << "h_3B_kspimkl_" << ix+1; - _h_3B_kspimkl .push_back(bookHisto1D(title12.str(),200,0.,1.8)); + std::ostringstream title3 ; title3 << "h_3B_kmpimkp_" << ix+1; + book(dummy, title3.str(),200,0.,1.8); + _h_3B_kmpimkp .push_back(dummy); + std::ostringstream title4 ; title4 << "h_3B_kmpi0k0_" << ix+1; + book(dummy, title4.str(),200,0.,1.8); + _h_3B_kmpi0k0 .push_back(dummy); + std::ostringstream title6 ; title6 << "h_3B_kmpimpip_" << ix+1; + book(dummy, title6.str(),200,0.,1.8); + _h_3B_kmpimpip .push_back(dummy); + std::ostringstream title7 ; title7 << "h_3B_pimk0pi0_" << ix+1; + book(dummy, title7.str(),200,0.,1.8); + _h_3B_pimk0pi0 .push_back(dummy); + std::ostringstream title8 ; title8 << "h_3B_pimpi0eta_" << ix+1; + book(dummy, title8.str(),200,0.,1.8); + _h_3B_pimpi0eta .push_back(dummy); + std::ostringstream title9 ; title9 << "h_3B_pimpi0gamma_" << ix+1; + book(dummy, title9.str(),200,0.,1.8); + _h_3B_pimpi0gamma .push_back(dummy); + std::ostringstream title12; title12 << "h_3B_kspimkl_" << ix+1; + book(dummy, title12.str(),200,0.,1.8); + _h_3B_kspimkl .push_back(dummy); } // 4 pion decays for(unsigned int ix=0;ix<5;++ix) { - ostringstream title1 ; title1 << "h_4B_pipi_" << ix+1; - _h_4B_pipi .push_back(bookHisto1D(title1.str(),200,0.,1.8)); - ostringstream title2 ; title2 << "h_4B_pipipi_" << ix+1; - _h_4B_pipipi.push_back(bookHisto1D(title2.str(),200,0.,1.8)); + std::ostringstream title1 ; title1 << "h_4B_pipi_" << ix+1; + book(dummy, title1.str(),200,0.,1.8); + _h_4B_pipi .push_back(dummy); + std::ostringstream title2 ; title2 << "h_4B_pipipi_" << ix+1; + book(dummy, title2.str(),200,0.,1.8); + _h_4B_pipipi.push_back(dummy); } - _h_4B_pipi .push_back(bookHisto1D("h_4B_pipi_6",200,0.,1.8)); + book(dummy, "h_4B_pipi_6",200,0.,1.8); + _h_4B_pipi .push_back(dummy); for(unsigned int ix=0;ix<2;++ix) { - ostringstream title ; title << "h_4B_pipipipi_" << ix+1; - _h_4B_pipipipi.push_back(bookHisto1D(title.str(),200,0.,1.8)); + std::ostringstream title ; title << "h_4B_pipipipi_" << ix+1; + book(dummy, title.str(),200,0.,1.8); + _h_4B_pipipipi.push_back(dummy); } // 5 pion decays // 2 pi0 2pi- pi+ - _h_5B_q1 = bookHisto1D("h_5B_q1",200,0.,1.8); + book(_h_5B_q1, "h_5B_q1",200,0.,1.8); for(unsigned int ix=0;ix<5;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipi1_" << ix+1; - _h_5B_pipi1.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipi1.push_back(dummy); } for(unsigned int ix=0;ix<5;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipipi1_" << ix+1; - _h_5B_pipipi1.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipipi1.push_back(dummy); } for(unsigned int ix=0;ix<3;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipipipi1_" << ix+1; - _h_5B_pipipipi1.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipipipi1.push_back(dummy); } // 4 pi0 pi- - _h_5B_q2 = bookHisto1D("h_5B_q2",200,0.,1.8); + book(_h_5B_q2, "h_5B_q2",200,0.,1.8); for(unsigned int ix=0;ix<2;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipi2_" << ix+1; - _h_5B_pipi2.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipi2.push_back(dummy); } for(unsigned int ix=0;ix<2;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipipi2_" << ix+1; - _h_5B_pipipi2.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipipi2.push_back(dummy); } for(unsigned int ix=0;ix<2;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipipipi2_" << ix+1; - _h_5B_pipipipi2.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipipipi2.push_back(dummy); } // 3 pi- 2 pi+ - _h_5B_q3 = bookHisto1D("h_5B_q3",200,0.,1.8); + book(_h_5B_q3, "h_5B_q3",200,0.,1.8); for(unsigned int ix=0;ix<3;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipi3_" << ix+1; - _h_5B_pipi3.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipi3.push_back(dummy); } for(unsigned int ix=0;ix<3;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipipi3_" << ix+1; - _h_5B_pipipi3.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipipi3.push_back(dummy); } for(unsigned int ix=0;ix<2;++ix) { - ostringstream title; + std::ostringstream title; title << "h_5B_pipipipi3_" << ix+1; - _h_5B_pipipipi3.push_back(bookHisto1D(title.str(), 200,0.,1.8 )); + book(dummy, title.str(),200,0.,1.8); + _h_5B_pipipipi3.push_back(dummy); } } void findDecayProducts(const Particle & mother, unsigned int & nstable, Particles & ep , Particles & em , Particles & nu_e , Particles & nu_ebar, Particles & mup , Particles & mum , Particles & nu_mu, Particles & nu_mubar, Particles & pip , Particles & pim , Particles & pi0 , Particles & Kp , Particles & Km , Particles & K0S , Particles & K0L, Particles & eta , Particles & gamma) { for(const Particle & p : mother.children()) { - int id = p.pdgId(); + int id = p.pid(); if ( id == PID::KPLUS ) { Kp.push_back(p); ++nstable; } else if (id == PID::KMINUS ) { Km.push_back(p); ++nstable; } else if (id == PID::PIPLUS) { pip.push_back(p); ++nstable; } else if (id == PID::PIMINUS) { pim.push_back(p); ++nstable; } else if (id == PID::EPLUS) { ep.push_back(p); ++nstable; } else if (id == PID::EMINUS) { em.push_back(p); ++nstable; } else if (id == PID::NU_E) { nu_e.push_back(p); ++nstable; } else if (id == PID::NU_EBAR) { nu_ebar.push_back(p); ++nstable; } else if (id == PID::NU_MU) { nu_mu.push_back(p); ++nstable; } else if (id == PID::NU_MUBAR) { nu_mubar.push_back(p); ++nstable; } else if (id == PID::ANTIMUON) { mup.push_back(p); ++nstable; } else if (id == PID::MUON) { mum.push_back(p); ++nstable; } else if (id == PID::PI0) { pi0.push_back(p); ++nstable; } else if (id == PID::K0S) { K0S.push_back(p); ++nstable; } else if (id == PID::K0L) { K0L.push_back(p); ++nstable; } else if (id == PID::ETA) { eta.push_back(p); ++nstable; } else if (id == PID::PHOTON) { gamma.push_back(p); ++nstable; } else if ( !p.children().empty() ) { findDecayProducts(p, nstable,ep,em,nu_e,nu_ebar,mup,mum,nu_mu,nu_mubar, pip, pim, pi0,Kp , Km, K0S, K0L,eta,gamma); } else ++nstable; } } /// Perform the per-event analysis void analyze(const Event& event) { - double weight = event.weight(); for(const Particle& tau : apply(event, "UFS").particles(Cuts::abspid==PID::TAU)) { unsigned int nstable(0); Particles ep,em,nu_e,nu_ebar,mup,mum,nu_mu,nu_mubar; Particles pip, pim, pi0, Kp , Km, K0S, K0L, eta,gamma; findDecayProducts(tau, nstable,ep,em,nu_e,nu_ebar,mup,mum,nu_mu,nu_mubar, pip, pim, pi0,Kp , Km, K0S, K0L,eta,gamma); - if(tau.pdgId()<0) { + if(tau.pid()<0) { swap(pim,pip); swap(Kp,Km); swap(em,ep); swap(mum,mup); swap(nu_e ,nu_ebar ); swap(nu_mu,nu_mubar); } // cerr << "testing before loop " << nstable << " " // << pip.size() << " " << pim.size() << " " << pi0.size() << " " // << Kp.size() << " " << Km.size() << " " << K0S.size() << " " << K0L.size() << "\n"; // 2 hadrons if(nstable==3 ) { if(em.size()==1 && nu_ebar.size()==1) { FourMomentum ptot = em[0].momentum()+nu_ebar[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2enu->fill( mass2 ,weight); - _h_2B_menu ->fill(sqrt(mass2),weight); + _h_2B_m2enu->fill( mass2 ); + _h_2B_menu ->fill(sqrt(mass2)); } else if(mum.size()==1 && nu_mubar.size()==1) { FourMomentum ptot = mum[0].momentum()+nu_mubar[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2munu->fill( mass2 ,weight); - _h_2B_mmunu ->fill(sqrt(mass2),weight); + _h_2B_m2munu->fill( mass2 ); + _h_2B_mmunu ->fill(sqrt(mass2)); } else if(pim.size()==1 && pi0.size()==1) { FourMomentum ptot = pim[0].momentum()+pi0[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2pipi->fill( mass2 ,weight); - _h_2B_mpipi ->fill(sqrt(mass2),weight); + _h_2B_m2pipi->fill( mass2 ); + _h_2B_mpipi ->fill(sqrt(mass2)); } else if(Km.size()==1 && pi0.size()==1) { FourMomentum ptot = Km[0].momentum()+pi0[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2KpiA->fill( mass2 ,weight); - _h_2B_mKpiA ->fill(sqrt(mass2),weight); + _h_2B_m2KpiA->fill( mass2 ); + _h_2B_mKpiA ->fill(sqrt(mass2)); } else if(K0S.size()==1&&pim.size()==1) { FourMomentum ptot = K0S[0].momentum()+pim[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2KpiB->fill( mass2 ,weight); - _h_2B_mKpiB ->fill(sqrt(mass2),weight); + _h_2B_m2KpiB->fill( mass2 ); + _h_2B_mKpiB ->fill(sqrt(mass2)); } else if(K0L.size()==1&&pim.size()==1) { FourMomentum ptot = K0L[0].momentum()+pim[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2KpiB->fill( mass2 ,weight); - _h_2B_mKpiB ->fill(sqrt(mass2),weight); + _h_2B_m2KpiB->fill( mass2 ); + _h_2B_mKpiB ->fill(sqrt(mass2)); } else if(K0S.size()==1&&Km.size()==1) { FourMomentum ptot = K0S[0].momentum()+Km[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2KK->fill( mass2 ,weight); - _h_2B_mKK ->fill(sqrt(mass2),weight); + _h_2B_m2KK->fill( mass2 ); + _h_2B_mKK ->fill(sqrt(mass2)); } else if(K0L.size()==1&&Km.size()==1) { FourMomentum ptot = K0L[0].momentum()+Km[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2KK->fill( mass2 ,weight); - _h_2B_mKK ->fill(sqrt(mass2),weight); + _h_2B_m2KK->fill( mass2 ); + _h_2B_mKK ->fill(sqrt(mass2)); } else if(eta.size()==1&&Km.size()==1) { FourMomentum ptot = eta[0].momentum()+Km[0].momentum(); double mass2 = ptot.mass2(); - _h_2B_m2Keta->fill( mass2 ,weight); - _h_2B_mKeta ->fill(sqrt(mass2),weight); + _h_2B_m2Keta->fill( mass2 ); + _h_2B_mKeta ->fill(sqrt(mass2)); } } else if(nstable==4) { if(pim.size()==2&&pip.size()==1) { - _h_3B_pippimpim[0]->fill((pim[0].momentum()+pim[1].momentum()+pip[0].momentum()).mass(),weight); - _h_3B_pippimpim[1]->fill((pim[0].momentum()+pim[1].momentum()).mass(),weight); - _h_3B_pippimpim[2]->fill((pim[0].momentum()+pip[0].momentum()).mass(),weight); - _h_3B_pippimpim[2]->fill((pim[1].momentum()+pip[0].momentum()).mass(),weight); + _h_3B_pippimpim[0]->fill((pim[0].momentum()+pim[1].momentum()+pip[0].momentum()).mass()); + _h_3B_pippimpim[1]->fill((pim[0].momentum()+pim[1].momentum()).mass()); + _h_3B_pippimpim[2]->fill((pim[0].momentum()+pip[0].momentum()).mass()); + _h_3B_pippimpim[2]->fill((pim[1].momentum()+pip[0].momentum()).mass()); } else if(pim.size()==1&&pi0.size()==2) { - _h_3B_pi0pi0pim[0]->fill((pi0[0].momentum()+pi0[1].momentum()+pim[0].momentum()).mass(),weight); - _h_3B_pi0pi0pim[1]->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_3B_pi0pi0pim[2]->fill((pi0[0].momentum()+pim[0].momentum()).mass(),weight); - _h_3B_pi0pi0pim[2]->fill((pi0[1].momentum()+pim[0].momentum()).mass(),weight); + _h_3B_pi0pi0pim[0]->fill((pi0[0].momentum()+pi0[1].momentum()+pim[0].momentum()).mass()); + _h_3B_pi0pi0pim[1]->fill((pi0[0].momentum()+pi0[1].momentum()).mass()); + _h_3B_pi0pi0pim[2]->fill((pi0[0].momentum()+pim[0].momentum()).mass()); + _h_3B_pi0pi0pim[2]->fill((pi0[1].momentum()+pim[0].momentum()).mass()); } else if(Km.size()==1&&Kp.size()==1&&pim.size()==1) { - _h_3B_kmpimkp[0]->fill((Km[0].momentum()+pim[0].momentum()+Kp[0].momentum()).mass(),weight); - _h_3B_kmpimkp[1]->fill((Km[0].momentum()+pim[0].momentum()).mass(),weight); - _h_3B_kmpimkp[2]->fill((Km[0].momentum()+ Kp[0].momentum()).mass(),weight); - _h_3B_kmpimkp[3]->fill((Kp[0].momentum()+pim[0].momentum()).mass(),weight); + _h_3B_kmpimkp[0]->fill((Km[0].momentum()+pim[0].momentum()+Kp[0].momentum()).mass()); + _h_3B_kmpimkp[1]->fill((Km[0].momentum()+pim[0].momentum()).mass()); + _h_3B_kmpimkp[2]->fill((Km[0].momentum()+ Kp[0].momentum()).mass()); + _h_3B_kmpimkp[3]->fill((Kp[0].momentum()+pim[0].momentum()).mass()); } else if((K0S.size()==1||K0L.size()==1)&&Km.size()==1&&pi0.size()==1) { FourMomentum pk = K0L.size()==1 ? K0L[0].momentum() : K0S[0].momentum(); - _h_3B_kmpi0k0[0]->fill((Km[0].momentum()+pi0[0].momentum()+pk).mass(),weight); - _h_3B_kmpi0k0[1]->fill((Km[0].momentum()+pi0[0].momentum()).mass(),weight); - _h_3B_kmpi0k0[2]->fill((Km[0].momentum()+pk ).mass(),weight); - _h_3B_kmpi0k0[3]->fill((pk+pi0[0].momentum()).mass(),weight); + _h_3B_kmpi0k0[0]->fill((Km[0].momentum()+pi0[0].momentum()+pk).mass()); + _h_3B_kmpi0k0[1]->fill((Km[0].momentum()+pi0[0].momentum()).mass()); + _h_3B_kmpi0k0[2]->fill((Km[0].momentum()+pk ).mass()); + _h_3B_kmpi0k0[3]->fill((pk+pi0[0].momentum()).mass()); } else if(pi0.size()==2&&Km.size()==1) { - _h_3B_pi0pi0km[0]->fill((pi0[0].momentum()+pi0[1].momentum()+Km[0].momentum()).mass(),weight); - _h_3B_pi0pi0km[1]->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_3B_pi0pi0km[2]->fill((pi0[0].momentum()+Km[0].momentum() ).mass(),weight); - _h_3B_pi0pi0km[2]->fill((pi0[1].momentum()+Km[0].momentum() ).mass(),weight); + _h_3B_pi0pi0km[0]->fill((pi0[0].momentum()+pi0[1].momentum()+Km[0].momentum()).mass()); + _h_3B_pi0pi0km[1]->fill((pi0[0].momentum()+pi0[1].momentum()).mass()); + _h_3B_pi0pi0km[2]->fill((pi0[0].momentum()+Km[0].momentum() ).mass()); + _h_3B_pi0pi0km[2]->fill((pi0[1].momentum()+Km[0].momentum() ).mass()); } else if(Km.size()==1&&pim.size()==1&&pip.size()==1) { - _h_3B_kmpimpip[0]->fill((pip[0].momentum()+pim[0].momentum()+Km[0].momentum()).mass(),weight); - _h_3B_kmpimpip[1]->fill((Km[0].momentum()+pim[0].momentum()).mass(),weight); - _h_3B_kmpimpip[2]->fill((Km[0].momentum()+pip[0].momentum() ).mass(),weight); - _h_3B_kmpimpip[3]->fill((pip[0].momentum()+pim[0].momentum() ).mass(),weight); + _h_3B_kmpimpip[0]->fill((pip[0].momentum()+pim[0].momentum()+Km[0].momentum()).mass()); + _h_3B_kmpimpip[1]->fill((Km[0].momentum()+pim[0].momentum()).mass()); + _h_3B_kmpimpip[2]->fill((Km[0].momentum()+pip[0].momentum() ).mass()); + _h_3B_kmpimpip[3]->fill((pip[0].momentum()+pim[0].momentum() ).mass()); } else if(pim.size()==1&&(K0S.size()==1||K0L.size()==1)&&pi0.size()==1) { FourMomentum pk = K0L.size()==1 ? K0L[0].momentum() : K0S[0].momentum(); - _h_3B_pimk0pi0[0]->fill((pim[0].momentum()+pi0[0].momentum()+pk).mass(),weight); - _h_3B_pimk0pi0[1]->fill((pim[0].momentum()+pk).mass(),weight); - _h_3B_pimk0pi0[2]->fill((pim[0].momentum()+pi0[0].momentum() ).mass(),weight); - _h_3B_pimk0pi0[3]->fill((pk+pi0[0].momentum()).mass(),weight); + _h_3B_pimk0pi0[0]->fill((pim[0].momentum()+pi0[0].momentum()+pk).mass()); + _h_3B_pimk0pi0[1]->fill((pim[0].momentum()+pk).mass()); + _h_3B_pimk0pi0[2]->fill((pim[0].momentum()+pi0[0].momentum() ).mass()); + _h_3B_pimk0pi0[3]->fill((pk+pi0[0].momentum()).mass()); } else if(pim.size()==1&&pi0.size()==1&&eta.size()==1) { - _h_3B_pimpi0eta[0]->fill((pim[0].momentum()+pi0[0].momentum()+eta[0].momentum()).mass(),weight); - _h_3B_pimpi0eta[1]->fill((pim[0].momentum()+pi0[0].momentum()).mass(),weight); - _h_3B_pimpi0eta[2]->fill((pim[0].momentum()+eta[0].momentum()).mass(),weight); - _h_3B_pimpi0eta[3]->fill((pi0[0].momentum()+eta[0].momentum()).mass(),weight); + _h_3B_pimpi0eta[0]->fill((pim[0].momentum()+pi0[0].momentum()+eta[0].momentum()).mass()); + _h_3B_pimpi0eta[1]->fill((pim[0].momentum()+pi0[0].momentum()).mass()); + _h_3B_pimpi0eta[2]->fill((pim[0].momentum()+eta[0].momentum()).mass()); + _h_3B_pimpi0eta[3]->fill((pi0[0].momentum()+eta[0].momentum()).mass()); } else if(pim.size()==1&&pi0.size()==1&&gamma.size()==1) { - _h_3B_pimpi0gamma[0]->fill((pim[0].momentum()+pi0[0].momentum()+gamma[0].momentum()).mass(),weight); - _h_3B_pimpi0gamma[1]->fill((pim[0].momentum()+pi0[0].momentum()).mass(),weight); - _h_3B_pimpi0gamma[2]->fill((pim[0].momentum()+gamma[0].momentum()).mass(),weight); - _h_3B_pimpi0gamma[3]->fill((pi0[0].momentum()+gamma[0].momentum()).mass(),weight); + _h_3B_pimpi0gamma[0]->fill((pim[0].momentum()+pi0[0].momentum()+gamma[0].momentum()).mass()); + _h_3B_pimpi0gamma[1]->fill((pim[0].momentum()+pi0[0].momentum()).mass()); + _h_3B_pimpi0gamma[2]->fill((pim[0].momentum()+gamma[0].momentum()).mass()); + _h_3B_pimpi0gamma[3]->fill((pi0[0].momentum()+gamma[0].momentum()).mass()); } else if(K0S.size()==2&&pim.size()==1) { - _h_3B_kspimks[0]->fill((pim[0].momentum()+K0S[0].momentum()+K0S[1].momentum()).mass(),weight); - _h_3B_kspimks[1]->fill((pim[0].momentum()+K0S[0].momentum()).mass(),weight); - _h_3B_kspimks[1]->fill((pim[0].momentum()+K0S[1].momentum()).mass(),weight); - _h_3B_kspimks[2]->fill((K0S [0].momentum()+K0S[1].momentum()).mass(),weight); + _h_3B_kspimks[0]->fill((pim[0].momentum()+K0S[0].momentum()+K0S[1].momentum()).mass()); + _h_3B_kspimks[1]->fill((pim[0].momentum()+K0S[0].momentum()).mass()); + _h_3B_kspimks[1]->fill((pim[0].momentum()+K0S[1].momentum()).mass()); + _h_3B_kspimks[2]->fill((K0S [0].momentum()+K0S[1].momentum()).mass()); } else if(K0L.size()==2&&pim.size()==1) { - _h_3B_klpimkl[0]->fill((pim[0].momentum()+K0L[0].momentum()+K0L[1].momentum()).mass(),weight); - _h_3B_klpimkl[1]->fill((pim[0].momentum()+K0L[0].momentum()).mass(),weight); - _h_3B_klpimkl[1]->fill((pim[0].momentum()+K0L[1].momentum()).mass(),weight); - _h_3B_klpimkl[2]->fill((K0L [0].momentum()+K0L[1].momentum()).mass(),weight); + _h_3B_klpimkl[0]->fill((pim[0].momentum()+K0L[0].momentum()+K0L[1].momentum()).mass()); + _h_3B_klpimkl[1]->fill((pim[0].momentum()+K0L[0].momentum()).mass()); + _h_3B_klpimkl[1]->fill((pim[0].momentum()+K0L[1].momentum()).mass()); + _h_3B_klpimkl[2]->fill((K0L [0].momentum()+K0L[1].momentum()).mass()); } else if(K0S.size()==1&&K0L.size()==1&&pim.size()==1) { - _h_3B_kspimkl[0]->fill((pim[0].momentum()+K0S[0].momentum()+K0L[0].momentum()).mass(),weight); - _h_3B_kspimkl[1]->fill((pim[0].momentum()+K0S[0].momentum()).mass(),weight); - _h_3B_kspimkl[2]->fill((K0S[0].momentum() +K0L[0].momentum()).mass(),weight); - _h_3B_kspimkl[3]->fill((pim[0].momentum()+K0L[0].momentum()).mass(),weight); + _h_3B_kspimkl[0]->fill((pim[0].momentum()+K0S[0].momentum()+K0L[0].momentum()).mass()); + _h_3B_kspimkl[1]->fill((pim[0].momentum()+K0S[0].momentum()).mass()); + _h_3B_kspimkl[2]->fill((K0S[0].momentum() +K0L[0].momentum()).mass()); + _h_3B_kspimkl[3]->fill((pim[0].momentum()+K0L[0].momentum()).mass()); } } else if(nstable==5) { if(pi0.size()==3&&pim.size()==1) { - _h_4B_pipi[0] ->fill( (pi0[0].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipi[0] ->fill( (pi0[1].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipi[0] ->fill( (pi0[2].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipi[1] ->fill( (pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_4B_pipi[1] ->fill( (pi0[0].momentum()+pi0[2].momentum()).mass(),weight); - _h_4B_pipi[1] ->fill( (pi0[1].momentum()+pi0[2].momentum()).mass(),weight); - _h_4B_pipipi[0] ->fill( (pi0[0].momentum()+pi0[1].momentum()+pi0[2].momentum()).mass(),weight); - _h_4B_pipipi[1] ->fill( (pi0[0].momentum()+pi0[1].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipipi[1] ->fill( (pi0[0].momentum()+pi0[2].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipipi[1] ->fill( (pi0[1].momentum()+pi0[2].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipipipi[0] ->fill( (pi0[0].momentum()+pi0[1].momentum()+pi0[2].momentum()+pim[0].momentum()).mass(),weight); + _h_4B_pipi[0] ->fill( (pi0[0].momentum()+pim[0].momentum()).mass()); + _h_4B_pipi[0] ->fill( (pi0[1].momentum()+pim[0].momentum()).mass()); + _h_4B_pipi[0] ->fill( (pi0[2].momentum()+pim[0].momentum()).mass()); + _h_4B_pipi[1] ->fill( (pi0[0].momentum()+pi0[1].momentum()).mass()); + _h_4B_pipi[1] ->fill( (pi0[0].momentum()+pi0[2].momentum()).mass()); + _h_4B_pipi[1] ->fill( (pi0[1].momentum()+pi0[2].momentum()).mass()); + _h_4B_pipipi[0] ->fill( (pi0[0].momentum()+pi0[1].momentum()+pi0[2].momentum()).mass()); + _h_4B_pipipi[1] ->fill( (pi0[0].momentum()+pi0[1].momentum()+pim[0].momentum()).mass()); + _h_4B_pipipi[1] ->fill( (pi0[0].momentum()+pi0[2].momentum()+pim[0].momentum()).mass()); + _h_4B_pipipi[1] ->fill( (pi0[1].momentum()+pi0[2].momentum()+pim[0].momentum()).mass()); + _h_4B_pipipipi[0] ->fill( (pi0[0].momentum()+pi0[1].momentum()+pi0[2].momentum()+pim[0].momentum()).mass()); } else if(pi0.size()==1&&pip.size()==1&&pim.size()==2) { - _h_4B_pipi[2] ->fill((pi0[0].momentum()+pip[0].momentum()).mass(),weight); - _h_4B_pipi[3] ->fill((pi0[0].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipi[3] ->fill((pi0[0].momentum()+pim[1].momentum()).mass(),weight); - _h_4B_pipi[4] ->fill((pip[0].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipi[4] ->fill((pip[0].momentum()+pim[1].momentum()).mass(),weight); - _h_4B_pipi[5] ->fill((pim[0].momentum()+pim[1].momentum()).mass(),weight); - _h_4B_pipipi[2] ->fill( (pi0[0].momentum()+pip[0].momentum()+pim[0].momentum()).mass(),weight); - _h_4B_pipipi[2] ->fill( (pi0[0].momentum()+pip[0].momentum()+pim[1].momentum()).mass(),weight); - _h_4B_pipipi[3] ->fill( (pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass(),weight); - _h_4B_pipipi[4] ->fill( (pi0[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass(),weight); - _h_4B_pipipipi[1] ->fill( (pi0[0].momentum()+pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass(),weight); + _h_4B_pipi[2] ->fill((pi0[0].momentum()+pip[0].momentum()).mass()); + _h_4B_pipi[3] ->fill((pi0[0].momentum()+pim[0].momentum()).mass()); + _h_4B_pipi[3] ->fill((pi0[0].momentum()+pim[1].momentum()).mass()); + _h_4B_pipi[4] ->fill((pip[0].momentum()+pim[0].momentum()).mass()); + _h_4B_pipi[4] ->fill((pip[0].momentum()+pim[1].momentum()).mass()); + _h_4B_pipi[5] ->fill((pim[0].momentum()+pim[1].momentum()).mass()); + _h_4B_pipipi[2] ->fill( (pi0[0].momentum()+pip[0].momentum()+pim[0].momentum()).mass()); + _h_4B_pipipi[2] ->fill( (pi0[0].momentum()+pip[0].momentum()+pim[1].momentum()).mass()); + _h_4B_pipipi[3] ->fill( (pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass()); + _h_4B_pipipi[4] ->fill( (pi0[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass()); + _h_4B_pipipipi[1] ->fill( (pi0[0].momentum()+pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass()); } } else if(nstable==6) { // 2 pi0 2pi- pi+ if(pi0.size()==2&&pim.size()==2&&pip.size()==1) { FourMomentum ptotal = pim[0].momentum()+pim[1].momentum()+ pip[0].momentum()+pi0[0].momentum()+pi0[1].momentum(); - _h_5B_pipi1[0]->fill((pim[0].momentum()+pim[1].momentum()).mass(),weight); - _h_5B_pipi1[1]->fill((pim[0].momentum()+pip[0].momentum()).mass(),weight); - _h_5B_pipi1[1]->fill((pim[1].momentum()+pip[0].momentum()).mass(),weight); - _h_5B_pipi1[2]->fill((pim[0].momentum()+pi0[0].momentum()).mass(),weight); - _h_5B_pipi1[2]->fill((pim[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_5B_pipi1[2]->fill((pim[1].momentum()+pi0[0].momentum()).mass(),weight); - _h_5B_pipi1[2]->fill((pim[1].momentum()+pi0[1].momentum()).mass(),weight); - _h_5B_pipi1[3]->fill((pip[0].momentum()+pi0[0].momentum()).mass(),weight); - _h_5B_pipi1[3]->fill((pip[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_5B_pipi1[4]->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_5B_pipipi1[0]->fill((pim[0].momentum()+pim[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[1]->fill((pim[0].momentum()+pip[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[1]->fill((pim[1].momentum()+pip[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[2]->fill((pim[0].momentum()+pi0[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[2]->fill((pim[0].momentum()+pi0[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[2]->fill((pim[1].momentum()+pi0[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[2]->fill((pim[1].momentum()+pi0[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[3]->fill((pip[0].momentum()+pi0[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[3]->fill((pip[0].momentum()+pi0[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi1[4]->fill((pi0[0].momentum()+pi0[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipipi1[0]->fill((ptotal-pim[0].momentum()).mass(),weight); - _h_5B_pipipipi1[0]->fill((ptotal-pim[1].momentum()).mass(),weight); - _h_5B_pipipipi1[1]->fill((ptotal-pip[0].momentum()).mass(),weight); - _h_5B_pipipipi1[2]->fill((ptotal-pi0[0].momentum()).mass(),weight); - _h_5B_pipipipi1[2]->fill((ptotal-pi0[1].momentum()).mass(),weight); - _h_5B_q1->fill(ptotal.mass(),weight); + _h_5B_pipi1[0]->fill((pim[0].momentum()+pim[1].momentum()).mass()); + _h_5B_pipi1[1]->fill((pim[0].momentum()+pip[0].momentum()).mass()); + _h_5B_pipi1[1]->fill((pim[1].momentum()+pip[0].momentum()).mass()); + _h_5B_pipi1[2]->fill((pim[0].momentum()+pi0[0].momentum()).mass()); + _h_5B_pipi1[2]->fill((pim[0].momentum()+pi0[1].momentum()).mass()); + _h_5B_pipi1[2]->fill((pim[1].momentum()+pi0[0].momentum()).mass()); + _h_5B_pipi1[2]->fill((pim[1].momentum()+pi0[1].momentum()).mass()); + _h_5B_pipi1[3]->fill((pip[0].momentum()+pi0[0].momentum()).mass()); + _h_5B_pipi1[3]->fill((pip[0].momentum()+pi0[1].momentum()).mass()); + _h_5B_pipi1[4]->fill((pi0[0].momentum()+pi0[1].momentum()).mass()); + _h_5B_pipipi1[0]->fill((pim[0].momentum()+pim[1].momentum()-ptotal).mass()); + _h_5B_pipipi1[1]->fill((pim[0].momentum()+pip[0].momentum()-ptotal).mass()); + _h_5B_pipipi1[1]->fill((pim[1].momentum()+pip[0].momentum()-ptotal).mass()); + _h_5B_pipipi1[2]->fill((pim[0].momentum()+pi0[0].momentum()-ptotal).mass()); + _h_5B_pipipi1[2]->fill((pim[0].momentum()+pi0[1].momentum()-ptotal).mass()); + _h_5B_pipipi1[2]->fill((pim[1].momentum()+pi0[0].momentum()-ptotal).mass()); + _h_5B_pipipi1[2]->fill((pim[1].momentum()+pi0[1].momentum()-ptotal).mass()); + _h_5B_pipipi1[3]->fill((pip[0].momentum()+pi0[0].momentum()-ptotal).mass()); + _h_5B_pipipi1[3]->fill((pip[0].momentum()+pi0[1].momentum()-ptotal).mass()); + _h_5B_pipipi1[4]->fill((pi0[0].momentum()+pi0[1].momentum()-ptotal).mass()); + _h_5B_pipipipi1[0]->fill((ptotal-pim[0].momentum()).mass()); + _h_5B_pipipipi1[0]->fill((ptotal-pim[1].momentum()).mass()); + _h_5B_pipipipi1[1]->fill((ptotal-pip[0].momentum()).mass()); + _h_5B_pipipipi1[2]->fill((ptotal-pi0[0].momentum()).mass()); + _h_5B_pipipipi1[2]->fill((ptotal-pi0[1].momentum()).mass()); + _h_5B_q1->fill(ptotal.mass()); } // 4 pi0 pi- else if(pi0.size()==4&&pim.size()==1) { FourMomentum ptotal = pi0[0].momentum()+pi0[1].momentum()+pi0[2].momentum()+ pi0[3].momentum()+pim[0].momentum(); - _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[0].momentum()).mass(),weight); - _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[2].momentum()).mass(),weight); - _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[3].momentum()).mass(),weight); - _h_5B_pipi2[1]->fill((pi0[0].momentum()+pi0[1].momentum()).mass(),weight); - _h_5B_pipi2[1]->fill((pi0[0].momentum()+pi0[2].momentum()).mass(),weight); - _h_5B_pipi2[1]->fill((pi0[0].momentum()+pi0[3].momentum()).mass(),weight); - _h_5B_pipi2[1]->fill((pi0[1].momentum()+pi0[2].momentum()).mass(),weight); - _h_5B_pipi2[1]->fill((pi0[1].momentum()+pi0[3].momentum()).mass(),weight); - _h_5B_pipi2[1]->fill((pi0[2].momentum()+pi0[3].momentum()).mass(),weight); - _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[2].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[3].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[1]->fill((pi0[0].momentum()+pi0[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[1]->fill((pi0[0].momentum()+pi0[2].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[1]->fill((pi0[0].momentum()+pi0[3].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[1]->fill((pi0[1].momentum()+pi0[2].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[1]->fill((pi0[1].momentum()+pi0[3].momentum()-ptotal).mass(),weight); - _h_5B_pipipi2[1]->fill((pi0[2].momentum()+pi0[3].momentum()-ptotal).mass(),weight); - _h_5B_pipipipi2[0]->fill((ptotal-pim[0].momentum()).mass(),weight); - _h_5B_pipipipi2[1]->fill((ptotal-pi0[0].momentum()).mass(),weight); - _h_5B_pipipipi2[1]->fill((ptotal-pi0[1].momentum()).mass(),weight); - _h_5B_pipipipi2[1]->fill((ptotal-pi0[2].momentum()).mass(),weight); - _h_5B_pipipipi2[1]->fill((ptotal-pi0[3].momentum()).mass(),weight); - _h_5B_q2->fill(ptotal.mass(),weight); + _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[0].momentum()).mass()); + _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[1].momentum()).mass()); + _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[2].momentum()).mass()); + _h_5B_pipi2[0]->fill((pim[0].momentum()+pi0[3].momentum()).mass()); + _h_5B_pipi2[1]->fill((pi0[0].momentum()+pi0[1].momentum()).mass()); + _h_5B_pipi2[1]->fill((pi0[0].momentum()+pi0[2].momentum()).mass()); + _h_5B_pipi2[1]->fill((pi0[0].momentum()+pi0[3].momentum()).mass()); + _h_5B_pipi2[1]->fill((pi0[1].momentum()+pi0[2].momentum()).mass()); + _h_5B_pipi2[1]->fill((pi0[1].momentum()+pi0[3].momentum()).mass()); + _h_5B_pipi2[1]->fill((pi0[2].momentum()+pi0[3].momentum()).mass()); + _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[0].momentum()-ptotal).mass()); + _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[1].momentum()-ptotal).mass()); + _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[2].momentum()-ptotal).mass()); + _h_5B_pipipi2[0]->fill((pim[0].momentum()+pi0[3].momentum()-ptotal).mass()); + _h_5B_pipipi2[1]->fill((pi0[0].momentum()+pi0[1].momentum()-ptotal).mass()); + _h_5B_pipipi2[1]->fill((pi0[0].momentum()+pi0[2].momentum()-ptotal).mass()); + _h_5B_pipipi2[1]->fill((pi0[0].momentum()+pi0[3].momentum()-ptotal).mass()); + _h_5B_pipipi2[1]->fill((pi0[1].momentum()+pi0[2].momentum()-ptotal).mass()); + _h_5B_pipipi2[1]->fill((pi0[1].momentum()+pi0[3].momentum()-ptotal).mass()); + _h_5B_pipipi2[1]->fill((pi0[2].momentum()+pi0[3].momentum()-ptotal).mass()); + _h_5B_pipipipi2[0]->fill((ptotal-pim[0].momentum()).mass()); + _h_5B_pipipipi2[1]->fill((ptotal-pi0[0].momentum()).mass()); + _h_5B_pipipipi2[1]->fill((ptotal-pi0[1].momentum()).mass()); + _h_5B_pipipipi2[1]->fill((ptotal-pi0[2].momentum()).mass()); + _h_5B_pipipipi2[1]->fill((ptotal-pi0[3].momentum()).mass()); + _h_5B_q2->fill(ptotal.mass()); } // 3 pi- 2pi+ else if(pim.size()==3&&pip.size()==2) { FourMomentum ptotal = pim[0].momentum()+pim[1].momentum()+ pim[2].momentum()+pip[0].momentum()+pip[1].momentum(); - _h_5B_pipi3[0]->fill((pip[0].momentum()+pip[1].momentum()).mass(),weight); - _h_5B_pipi3[1]->fill((pim[0].momentum()+pip[0].momentum()).mass(),weight); - _h_5B_pipi3[1]->fill((pim[0].momentum()+pip[1].momentum()).mass(),weight); - _h_5B_pipi3[1]->fill((pim[1].momentum()+pip[0].momentum()).mass(),weight); - _h_5B_pipi3[1]->fill((pim[1].momentum()+pip[1].momentum()).mass(),weight); - _h_5B_pipi3[1]->fill((pim[2].momentum()+pip[0].momentum()).mass(),weight); - _h_5B_pipi3[1]->fill((pim[2].momentum()+pip[1].momentum()).mass(),weight); - _h_5B_pipi3[2]->fill((pim[0].momentum()+pim[1].momentum()).mass(),weight); - _h_5B_pipi3[2]->fill((pim[0].momentum()+pim[2].momentum()).mass(),weight); - _h_5B_pipi3[2]->fill((pim[1].momentum()+pim[2].momentum()).mass(),weight); - _h_5B_pipipi3[0]->fill((pip[0].momentum()+pip[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[1]->fill((pim[0].momentum()+pip[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[1]->fill((pim[0].momentum()+pip[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[1]->fill((pim[1].momentum()+pip[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[1]->fill((pim[1].momentum()+pip[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[1]->fill((pim[2].momentum()+pip[0].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[1]->fill((pim[2].momentum()+pip[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[2]->fill((pim[0].momentum()+pim[1].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[2]->fill((pim[0].momentum()+pim[2].momentum()-ptotal).mass(),weight); - _h_5B_pipipi3[2]->fill((pim[1].momentum()+pim[2].momentum()-ptotal).mass(),weight); - _h_5B_pipipipi3[0]->fill((ptotal-pim[0].momentum()).mass(),weight); - _h_5B_pipipipi3[0]->fill((ptotal-pim[1].momentum()).mass(),weight); - _h_5B_pipipipi3[0]->fill((ptotal-pim[2].momentum()).mass(),weight); - _h_5B_pipipipi3[1]->fill((ptotal-pip[0].momentum()).mass(),weight); - _h_5B_pipipipi3[1]->fill((ptotal-pip[1].momentum()).mass(),weight); - _h_5B_q3->fill(ptotal.mass(),weight); + _h_5B_pipi3[0]->fill((pip[0].momentum()+pip[1].momentum()).mass()); + _h_5B_pipi3[1]->fill((pim[0].momentum()+pip[0].momentum()).mass()); + _h_5B_pipi3[1]->fill((pim[0].momentum()+pip[1].momentum()).mass()); + _h_5B_pipi3[1]->fill((pim[1].momentum()+pip[0].momentum()).mass()); + _h_5B_pipi3[1]->fill((pim[1].momentum()+pip[1].momentum()).mass()); + _h_5B_pipi3[1]->fill((pim[2].momentum()+pip[0].momentum()).mass()); + _h_5B_pipi3[1]->fill((pim[2].momentum()+pip[1].momentum()).mass()); + _h_5B_pipi3[2]->fill((pim[0].momentum()+pim[1].momentum()).mass()); + _h_5B_pipi3[2]->fill((pim[0].momentum()+pim[2].momentum()).mass()); + _h_5B_pipi3[2]->fill((pim[1].momentum()+pim[2].momentum()).mass()); + _h_5B_pipipi3[0]->fill((pip[0].momentum()+pip[1].momentum()-ptotal).mass()); + _h_5B_pipipi3[1]->fill((pim[0].momentum()+pip[0].momentum()-ptotal).mass()); + _h_5B_pipipi3[1]->fill((pim[0].momentum()+pip[1].momentum()-ptotal).mass()); + _h_5B_pipipi3[1]->fill((pim[1].momentum()+pip[0].momentum()-ptotal).mass()); + _h_5B_pipipi3[1]->fill((pim[1].momentum()+pip[1].momentum()-ptotal).mass()); + _h_5B_pipipi3[1]->fill((pim[2].momentum()+pip[0].momentum()-ptotal).mass()); + _h_5B_pipipi3[1]->fill((pim[2].momentum()+pip[1].momentum()-ptotal).mass()); + _h_5B_pipipi3[2]->fill((pim[0].momentum()+pim[1].momentum()-ptotal).mass()); + _h_5B_pipipi3[2]->fill((pim[0].momentum()+pim[2].momentum()-ptotal).mass()); + _h_5B_pipipi3[2]->fill((pim[1].momentum()+pim[2].momentum()-ptotal).mass()); + _h_5B_pipipipi3[0]->fill((ptotal-pim[0].momentum()).mass()); + _h_5B_pipipipi3[0]->fill((ptotal-pim[1].momentum()).mass()); + _h_5B_pipipipi3[0]->fill((ptotal-pim[2].momentum()).mass()); + _h_5B_pipipipi3[1]->fill((ptotal-pip[0].momentum()).mass()); + _h_5B_pipipipi3[1]->fill((ptotal-pip[1].momentum()).mass()); + _h_5B_q3->fill(ptotal.mass()); } } } } /// Normalise histograms etc., after the run void finalize() { // leptonic normalize(_h_2B_m2enu); normalize(_h_2B_menu ); // 2 hadrons normalize(_h_2B_m2pipi); normalize(_h_2B_mpipi ); normalize(_h_2B_m2munu); normalize(_h_2B_mmunu ); normalize(_h_2B_m2KpiA); normalize(_h_2B_mKpiA ); normalize(_h_2B_m2KpiB); normalize(_h_2B_mKpiB ); normalize(_h_2B_m2Keta); normalize(_h_2B_mKeta ); normalize(_h_2B_m2KK ); normalize(_h_2B_mKK ); // 3 hadrons for(unsigned int ix=0;ix<4;++ix) { if(ix<3) { normalize(_h_3B_pippimpim [ix]); normalize(_h_3B_pi0pi0pim [ix]); normalize(_h_3B_pi0pi0km [ix]); normalize(_h_3B_kspimks [ix]); normalize(_h_3B_klpimkl [ix]); } normalize(_h_3B_kmpimkp [ix]); normalize(_h_3B_kmpi0k0 [ix]); normalize(_h_3B_kmpimpip [ix]); normalize(_h_3B_pimk0pi0 [ix]); normalize(_h_3B_pimpi0eta [ix]); normalize(_h_3B_pimpi0gamma[ix]); normalize(_h_3B_kspimkl [ix]); } // 4 pion decays for(unsigned int ix=0;ix<5;++ix) { normalize(_h_4B_pipi [ix]); normalize(_h_4B_pipipi[ix]); } normalize(_h_4B_pipi[5]); for(unsigned int ix=0;ix<2;++ix) { normalize(_h_4B_pipipipi[ix]); } // 5 pions normalize(_h_5B_q1); for(unsigned int ix=0;ix<5;++ix) { normalize(_h_5B_pipi1); normalize(_h_5B_pipipi1); } for(unsigned int ix=0;ix<3;++ix) { normalize(_h_5B_pipipipi1); } // 4 pi0 pi- normalize(_h_5B_q2); for(unsigned int ix=0;ix<2;++ix) { normalize(_h_5B_pipi2); normalize(_h_5B_pipipi2); normalize(_h_5B_pipipipi2); } // 3 pi- 2 pi+ normalize(_h_5B_q3); for(unsigned int ix=0;ix<3;++ix) { normalize(_h_5B_pipi3); normalize(_h_5B_pipipi3); } for(unsigned int ix=0;ix<2;++ix) { normalize(_h_5B_pipipipi3); } } //@} /// @name Histograms //@{ // histograms for leptonic decay Histo1DPtr _h_2B_m2enu,_h_2B_menu; Histo1DPtr _h_2B_m2munu,_h_2B_mmunu; // histograms for 2 hadron decay Histo1DPtr _h_2B_m2pipi,_h_2B_mpipi; Histo1DPtr _h_2B_m2KpiA,_h_2B_m2KpiB,_h_2B_mKpiA,_h_2B_mKpiB; Histo1DPtr _h_2B_m2Keta,_h_2B_mKeta; Histo1DPtr _h_2B_m2KK,_h_2B_mKK; // histograms for 3 hadronc decay // Histograms for tau^- -> nu_tau pi^+pi^-pi^- vector _h_3B_pippimpim; //Histograms for tau^- -> nu_tau pi^0pi^0pi^- vector _h_3B_pi0pi0pim; // Histograms for tau^- -> nu_tau K^-K^+pi^- vector _h_3B_kmpimkp; // Histograms for tau^- -> nu_tau K^-K^0pi^0 vector _h_3B_kmpi0k0; // Histograms for tau^- -> nu_tau pi^0pi^0K^- vector _h_3B_pi0pi0km; // Histograms for tau^- -> nu_tau K^-pi^-pi^+ vector _h_3B_kmpimpip; // Histograms for tau^- -> nu_tau pi^-K^0pi^0 vector _h_3B_pimk0pi0; // Histograms for tau^- -> nu_tau pi^-pi^0eta vector _h_3B_pimpi0eta; // Histograms for tau^- -> nu_tau pi^-pi^0gamma vector _h_3B_pimpi0gamma; // Histograms for tau^- -> nu_tau K^0_SK^0_Spi^- vector _h_3B_kspimks; // Histograms for tau^- -> nu_tau K^0_LK^0_Lpi^- vector _h_3B_klpimkl; // Histograms for tau^- -> nu_tau K^0_SK^0_Lpi^- vector _h_3B_kspimkl; // histograms for 4 pion decay // Histograms for the pipi mass distributions vector _h_4B_pipi; // Histograms for the pipipi mass distributions vector _h_4B_pipipi; // Histograms for the pipipipi mass distributions vector _h_4B_pipipipi; // histograms for 5 pion decay // 2 pi0 2 pi- pi+ Histo1DPtr _h_5B_q1; vector _h_5B_pipi1; vector _h_5B_pipipi1; vector _h_5B_pipipipi1; // 4 pi0 pi- Histo1DPtr _h_5B_q2; vector _h_5B_pipi2; vector _h_5B_pipipi2; vector _h_5B_pipipipi2; // 3 pi- 2 pi+ Histo1DPtr _h_5B_q3; vector _h_5B_pipi3; vector _h_5B_pipipi3; vector _h_5B_pipipipi3; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_TAU_Decay); } diff --git a/analyses/pluginMC/MC_XS.cc b/analyses/pluginMC/MC_XS.cc --- a/analyses/pluginMC/MC_XS.cc +++ b/analyses/pluginMC/MC_XS.cc @@ -1,90 +1,91 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #ifndef ENABLE_HEPMC_3 #include "HepMC/HepMCDefs.h" #endif namespace Rivet { /// @brief Analysis for the generated cross section class MC_XS : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor MC_XS() : Analysis("MC_XS") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// @todo Convert to Scatter1D or Counter - _h_XS = bookScatter2D("XS"); - _h_N = bookHisto1D("N", 1, 0.0, 1.0); - _h_pmXS = bookHisto1D("pmXS", 2, -1.0, 1.0); - _h_pmN = bookHisto1D("pmN", 2, -1.0, 1.0); + book(_h_XS, "XS"); + book(_h_N, "N", 1, 0.0, 1.0); + book(_h_pmXS, "pmXS", 2, -1.0, 1.0); + book(_h_pmN, "pmN", 2, -1.0, 1.0); _mc_xs = _mc_error = 0.; } /// Perform the per-event analysis void analyze(const Event& event) { - _h_N->fill(0.5,1.); - _h_pmXS->fill(0.5*(event.weight() > 0 ? 1. : -1), abs(event.weight())); - _h_pmN ->fill(0.5*(event.weight() > 0 ? 1. : -1), 1.); + // *** LEIF *** This doesn't really make sense any more. + _h_N->fill(0.5); + _h_pmXS->fill(0.5); + _h_pmN ->fill(0.5); #if defined ENABLE_HEPMC_3 //@todo HepMC3::GenCrossSection methods aren't const accessible :( RivetHepMC::GenCrossSection gcs = *(event.genEvent()->cross_section()); _mc_xs = gcs.xsec(); _mc_error = gcs.xsec_err(); #elif defined HEPMC_HAS_CROSS_SECTION _mc_xs = event.genEvent()->cross_section()->cross_section(); _mc_error = event.genEvent()->cross_section()->cross_section_error(); #endif // VERSION_CODE >= 3000000 } /// Normalise histograms etc., after the run void finalize() { scale(_h_pmXS, crossSection()/sumOfWeights()); #ifndef HEPMC_HAS_CROSS_SECTION _mc_xs = crossSection(); _mc_error = 0.0; #endif _h_XS->addPoint(0, _mc_xs, 0.5, _mc_error); } //@} private: /// @name Histograms //@{ Scatter2DPtr _h_XS; Histo1DPtr _h_N; Histo1DPtr _h_pmXS; Histo1DPtr _h_pmN; double _mc_xs, _mc_error; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_XS); } diff --git a/include/Rivet/Projections/DISRapidityGap.hh b/include/Rivet/Projections/DISRapidityGap.hh --- a/include/Rivet/Projections/DISRapidityGap.hh +++ b/include/Rivet/Projections/DISRapidityGap.hh @@ -1,94 +1,94 @@ // -*- C++ -*- #ifndef RIVET_DISRapidityGap_HH #define RIVET_DISRapidityGap_HH #include "Rivet/Projections/DISKinematics.hh" #include "Rivet/Projections/DISFinalState.hh" #include "Rivet/Particle.hh" #include "Rivet/Event.hh" namespace Rivet { /// @brief Get the incoming and outgoing hadron in a diffractive ep /// event. class DISRapidityGap : public Projection { public: /// Type of DIS boost to apply enum Frame { HCM, LAB, XCM }; DISRapidityGap() { setName("DISRapidityGap"); declare(DISKinematics(), "DISKIN"); - declare(DISFinalState(DISFinalState::HCM), "DISFS"); + declare(DISFinalState(DISFinalState::BoostFrame::HCM), "DISFS"); } DEFAULT_RIVET_PROJ_CLONE(DISRapidityGap); const double M2X() const {return _M2X;} const double M2Y() const {return _M2Y;} const double t() const {return _t;} const double gap() const {return _gap;} const double gapUpp() const {return _gapUpp;} const double gapLow() const {return _gapLow;} const double EpPzX(Frame f) const { if (f == LAB) return _ePpzX_LAB; else if (f == XCM) return _ePpzX_XCM; else return _ePpzX_HCM; } const double EmPzX(Frame f) const { if (f == LAB) return _eMpzX_LAB; else if (f == XCM) return _eMpzX_XCM; else return _eMpzX_HCM; } const FourMomentum pX(Frame f) const { if (f == LAB) return _momX_LAB; else if (f == XCM) return _momX_XCM; else return _momX_HCM; } const FourMomentum pY(Frame f) const { if (f == LAB) return _momY_LAB; else if (f == XCM) return _momY_XCM; else return _momY_HCM; } const Particles& systemX(Frame f) const { if (f == LAB) return _pX_LAB; else if (f == XCM) return _pX_XCM; else return _pX_HCM; } const Particles& systemY(Frame f) const { if (f == LAB) return _pY_LAB; else if (f == XCM) return _pY_XCM; else return _pY_HCM; } protected: virtual CmpState compare(const Projection& p) const; virtual void project(const Event& e); void clearAll(); void findgap(const Particles& particles, const DISKinematics& diskin); private: double _M2X, _M2Y, _t; double _gap, _gapUpp, _gapLow; double _ePpzX_LAB, _eMpzX_LAB; double _ePpzX_HCM, _eMpzX_HCM; double _ePpzX_XCM, _eMpzX_XCM; FourMomentum _momX_HCM, _momY_HCM; FourMomentum _momX_LAB, _momY_LAB; FourMomentum _momX_XCM, _momY_XCM; Particles _pX_HCM, _pY_HCM, _pX_LAB, _pY_LAB, _pX_XCM, _pY_XCM; }; } #endif