Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F10881638
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
50 KB
Subscribers
None
View Options
diff --git a/analyses/pluginATLAS/ATLAS_2011_S9120807.cc b/analyses/pluginATLAS/ATLAS_2011_S9120807.cc
--- a/analyses/pluginATLAS/ATLAS_2011_S9120807.cc
+++ b/analyses/pluginATLAS/ATLAS_2011_S9120807.cc
@@ -1,147 +1,146 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief Measurement of isolated diphoton + X differential cross-sections
///
/// Inclusive isolated gamma gamma cross-sections, differential in M(gg), pT(gg), dphi(gg)
///
/// @author Giovanni Marchiori
class ATLAS_2011_S9120807 : public Analysis {
public:
/// Constructor
ATLAS_2011_S9120807()
: Analysis("ATLAS_2011_S9120807")
{ }
/// Book histograms and initialise projections before the run
void init() {
FinalState fs;
declare(fs, "FS");
FastJets fj(fs, FastJets::KT, 0.5);
fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec()));
declare(fj, "KtJetsD05");
IdentifiedFinalState photonfs(Cuts::abseta < 2.37 && Cuts::pT > 16*GeV);
photonfs.acceptId(PID::PHOTON);
declare(photonfs, "Photon");
book(_h_M ,1, 1, 1);
book(_h_pT ,2, 1, 1);
book(_h_dPhi ,3, 1, 1);
}
size_t getEtaBin(double eta) const {
const double aeta = fabs(eta);
return binIndex(aeta, _eta_bins_areaoffset);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Require at least 2 photons in final state
Particles photons = apply<IdentifiedFinalState>(event, "Photon").particlesByPt();
if (photons.size() < 2) vetoEvent;
// Compute jet pT densities
vector<double> ptDensity;
vector< vector<double> > ptDensities(_eta_bins_areaoffset.size()-1);
const shared_ptr<fastjet::ClusterSequenceArea> clust_seq_area = apply<FastJets>(event, "KtJetsD05").clusterSeqArea();
for (const Jet& jet : apply<FastJets>(event, "KtJetsD05").jets()) {
const double area = clust_seq_area->area(jet); // .pseudojet() called implicitly
/// @todo Should be 1e-4?
if (area > 10e-4 && jet.abseta() < _eta_bins_areaoffset.back()) {
ptDensities.at(getEtaBin(jet.abseta())).push_back(jet.pT()/area);
}
}
// Compute the median energy density
for (size_t b = 0; b < _eta_bins_areaoffset.size()-1; ++b) {
ptDensity += ptDensities[b].empty() ? 0 : median(ptDensities[b]);
}
// Loop over photons and fill vector of isolated ones
Particles isolated_photons;
for (const Particle& photon : photons) {
// Remove photons in crack
if (inRange(photon.abseta(), 1.37, 1.52)) continue;
// Standard ET cone isolation
const Particles& fs = apply<FinalState>(event, "FS").particles();
FourMomentum mom_in_EtCone;
for (const Particle& p : fs) {
// Check if it's in the cone of .4
if (deltaR(photon, p) >= 0.4) continue;
// Veto if it's in the 5x7 central core
if (fabs(deltaEta(photon, p)) < 0.025*5.0*0.5 &&
fabs(deltaPhi(photon, p)) < (M_PI/128.)*7.0*0.5) continue;
// Increment isolation cone ET sum
mom_in_EtCone += p.momentum();
}
// Now figure out the correction (area*density)
const double ETCONE_AREA = M_PI*.4*.4 - (7.0*.025)*(5.0*M_PI/128.);
const double correction = ptDensity[getEtaBin(photon.abseta())] * ETCONE_AREA;
// Shouldn't need to subtract photon
// NB. Using expected cut at hadron/particle level, not cut at reco level
if (mom_in_EtCone.Et() - correction > 4.0*GeV) continue;
// Add to list of isolated photons
isolated_photons.push_back(photon);
}
// Require at least two isolated photons
if (isolated_photons.size() < 2) vetoEvent;
// Select leading pT pair
std::sort(isolated_photons.begin(), isolated_photons.end(), cmpMomByPt);
const FourMomentum y1 = isolated_photons[0].momentum();
const FourMomentum y2 = isolated_photons[1].momentum();
// Require the two photons to be separated (dR>0.4)
if (deltaR(y1, y2) < 0.4) vetoEvent;
const FourMomentum yy = y1 + y2;
const double Myy = yy.mass()/GeV;
const double pTyy = yy.pT()/GeV;
const double dPhiyy = deltaPhi(y1.phi(), y2.phi());
- const double weight = 1.0;
- _h_M->fill(Myy, weight);
- _h_pT->fill(pTyy, weight);
- _h_dPhi->fill(dPhiyy, weight);
+ _h_M->fill(Myy);
+ _h_pT->fill(pTyy);
+ _h_dPhi->fill(dPhiyy);
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_M, crossSection()/sumOfWeights());
scale(_h_pT, crossSection()/sumOfWeights());
scale(_h_dPhi, crossSection()/sumOfWeights());
}
private:
Histo1DPtr _h_M, _h_pT, _h_dPhi;
const vector<double> _eta_bins_areaoffset = {0.0, 1.5, 3.0};
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2011_S9120807);
}
diff --git a/analyses/pluginATLAS/ATLAS_2011_S9128077.cc b/analyses/pluginATLAS/ATLAS_2011_S9128077.cc
--- a/analyses/pluginATLAS/ATLAS_2011_S9128077.cc
+++ b/analyses/pluginATLAS/ATLAS_2011_S9128077.cc
@@ -1,222 +1,220 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
class ATLAS_2011_S9128077 : public Analysis {
public:
/// Constructor
ATLAS_2011_S9128077()
: Analysis("ATLAS_2011_S9128077")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Projections
const FinalState fs;
FastJets j4(fs, FastJets::ANTIKT, 0.4);
j4.useInvisibles();
declare(j4, "AntiKtJets04");
FastJets j6(fs, FastJets::ANTIKT, 0.6);
j6.useInvisibles();
declare(j6, "AntiKtJets06");
// Persistent histograms
book(_h_jet_multi_inclusive ,1, 1, 1);
book(_h_jet_multi_ratio, 2, 1, 1);
_h_jet_pT.resize(4);
book(_h_jet_pT[0] ,3, 1, 1);
book(_h_jet_pT[1] ,4, 1, 1);
book(_h_jet_pT[2] ,5, 1, 1);
book(_h_jet_pT[3] ,6, 1, 1);
book(_h_HT_2 ,7, 1, 1);
book(_h_HT_3 ,8, 1, 1);
book(_h_HT_4 ,9, 1, 1);
//
book(_h_pTlead_R06_60_ratio, 10, 1, 1);
book(_h_pTlead_R06_80_ratio, 11, 1, 1);
book(_h_pTlead_R06_110_ratio, 12, 1, 1);
book(_h_pTlead_R04_60_ratio, 13, 1, 1);
book(_h_pTlead_R04_80_ratio, 14, 1, 1);
book(_h_pTlead_R04_110_ratio, 15, 1, 1);
book(_h_HT2_R06_ratio, 16, 1, 1);
book(_h_HT2_R04_ratio, 17, 1, 1);
// Temporary histograms to be divided for the dsigma3/dsigma2 ratios
book(_h_tmp_pTlead_R06_60_2 , "TMP/pTlead_R06_60_2 ", refData(10, 1, 1));
book(_h_tmp_pTlead_R06_80_2 , "TMP/pTlead_R06_80_2 ", refData(11, 1, 1));
book(_h_tmp_pTlead_R06_110_2, "TMP/pTlead_R06_110_2", refData(12, 1, 1));
book(_h_tmp_pTlead_R06_60_3 , "TMP/pTlead_R06_60_3 ", refData(10, 1, 1));
book(_h_tmp_pTlead_R06_80_3 , "TMP/pTlead_R06_80_3 ", refData(11, 1, 1));
book(_h_tmp_pTlead_R06_110_3, "TMP/pTlead_R06_110_3", refData(12, 1, 1));
//
book(_h_tmp_pTlead_R04_60_2 , "TMP/pTlead_R04_60_2 ", refData(13, 1, 1));
book(_h_tmp_pTlead_R04_80_2 , "TMP/pTlead_R04_80_2 ", refData(14, 1, 1));
book(_h_tmp_pTlead_R04_110_2, "TMP/pTlead_R04_110_2", refData(15, 1, 1));
book(_h_tmp_pTlead_R04_60_3 , "TMP/pTlead_R04_60_3 ", refData(13, 1, 1));
book(_h_tmp_pTlead_R04_80_3 , "TMP/pTlead_R04_80_3 ", refData(14, 1, 1));
book(_h_tmp_pTlead_R04_110_3, "TMP/pTlead_R04_110_3", refData(15, 1, 1));
//
book(_h_tmp_HT2_R06_2, "TMP/HT2_R06_2", refData(16, 1, 1));
book(_h_tmp_HT2_R06_3, "TMP/HT2_R06_3", refData(16, 1, 1));
book(_h_tmp_HT2_R04_2, "TMP/HT2_R04_2", refData(17, 1, 1));
book(_h_tmp_HT2_R04_3, "TMP/HT2_R04_3", refData(17, 1, 1));
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
vector<FourMomentum> jets04;
foreach (const Jet& jet, apply<FastJets>(event, "AntiKtJets04").jetsByPt(60.0*GeV)) {
if (jet.abseta() < 2.8) {
jets04.push_back(jet.momentum());
}
}
if (jets04.size() > 1 && jets04[0].pT() > 80.0*GeV) {
for (size_t i = 2; i <= jets04.size(); ++i) {
- _h_jet_multi_inclusive->fill(i, weight);
+ _h_jet_multi_inclusive->fill(i);
}
double HT = 0.0;
for (size_t i = 0; i < jets04.size(); ++i) {
- if (i < _h_jet_pT.size()) _h_jet_pT[i]->fill(jets04[i].pT(), weight);
+ if (i < _h_jet_pT.size()) _h_jet_pT[i]->fill(jets04[i].pT());
HT += jets04[i].pT();
}
- if (jets04.size() >= 2) _h_HT_2->fill(HT, weight);
- if (jets04.size() >= 3) _h_HT_3->fill(HT, weight);
- if (jets04.size() >= 4) _h_HT_4->fill(HT, weight);
+ if (jets04.size() >= 2) _h_HT_2->fill(HT);
+ if (jets04.size() >= 3) _h_HT_3->fill(HT);
+ if (jets04.size() >= 4) _h_HT_4->fill(HT);
double pT1(jets04[0].pT()), pT2(jets04[1].pT());
double HT2 = pT1 + pT2;
if (jets04.size() >= 2) {
- _h_tmp_HT2_R04_2->fill(HT2, weight);
- _h_tmp_pTlead_R04_60_2->fill(pT1, weight);
- if (pT2 > 80.0*GeV) _h_tmp_pTlead_R04_80_2->fill(pT1, weight);
- if (pT2 > 110.0*GeV) _h_tmp_pTlead_R04_110_2->fill(pT1, weight);
+ _h_tmp_HT2_R04_2->fill(HT2);
+ _h_tmp_pTlead_R04_60_2->fill(pT1);
+ if (pT2 > 80.0*GeV) _h_tmp_pTlead_R04_80_2->fill(pT1);
+ if (pT2 > 110.0*GeV) _h_tmp_pTlead_R04_110_2->fill(pT1);
}
if (jets04.size() >= 3) {
double pT3(jets04[2].pT());
- _h_tmp_HT2_R04_3->fill(HT2, weight);
- _h_tmp_pTlead_R04_60_3->fill(pT1, weight);
- if (pT3 > 80.0*GeV) _h_tmp_pTlead_R04_80_3->fill(pT1, weight);
- if (pT3 > 110.0*GeV) _h_tmp_pTlead_R04_110_3->fill(pT1, weight);
+ _h_tmp_HT2_R04_3->fill(HT2);
+ _h_tmp_pTlead_R04_60_3->fill(pT1);
+ if (pT3 > 80.0*GeV) _h_tmp_pTlead_R04_80_3->fill(pT1);
+ if (pT3 > 110.0*GeV) _h_tmp_pTlead_R04_110_3->fill(pT1);
}
}
/// @todo It'd be better to avoid duplicating 95% of the code!
vector<FourMomentum> jets06;
foreach (const Jet& jet, apply<FastJets>(event, "AntiKtJets06").jetsByPt(60.0*GeV)) {
if (jet.abseta() < 2.8) {
jets06.push_back(jet.momentum());
}
}
if (jets06.size() > 1 && jets06[0].pT() > 80.0*GeV) {
double pT1(jets06[0].pT()), pT2(jets06[1].pT());
double HT2 = pT1 + pT2;
if (jets06.size() >= 2) {
- _h_tmp_HT2_R06_2->fill(HT2, weight);
- _h_tmp_pTlead_R06_60_2->fill(pT1, weight);
- if (pT2 > 80.0*GeV) _h_tmp_pTlead_R06_80_2->fill(pT1, weight);
- if (pT2 > 110.0*GeV) _h_tmp_pTlead_R06_110_2->fill(pT1, weight);
+ _h_tmp_HT2_R06_2->fill(HT2);
+ _h_tmp_pTlead_R06_60_2->fill(pT1);
+ if (pT2 > 80.0*GeV) _h_tmp_pTlead_R06_80_2->fill(pT1);
+ if (pT2 > 110.0*GeV) _h_tmp_pTlead_R06_110_2->fill(pT1);
}
if (jets06.size() >= 3) {
double pT3(jets06[2].pT());
- _h_tmp_HT2_R06_3->fill(HT2, weight);
- _h_tmp_pTlead_R06_60_3->fill(pT1, weight);
- if (pT3 > 80.0*GeV) _h_tmp_pTlead_R06_80_3->fill(pT1, weight);
- if (pT3 > 110.0*GeV) _h_tmp_pTlead_R06_110_3->fill(pT1, weight);
+ _h_tmp_HT2_R06_3->fill(HT2);
+ _h_tmp_pTlead_R06_60_3->fill(pT1);
+ if (pT3 > 80.0*GeV) _h_tmp_pTlead_R06_80_3->fill(pT1);
+ if (pT3 > 110.0*GeV) _h_tmp_pTlead_R06_110_3->fill(pT1);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
// Fill inclusive jet multiplicity ratio
Histo1D temphisto(refData(2, 1, 1));
for (size_t b = 0; b < temphisto.numBins(); ++b) {
if (_h_jet_multi_inclusive->bin(b).sumW() != 0) {
const double x = temphisto.bin(b).xMid();
const double ex = temphisto.bin(b).xWidth()/2.;
const double val = _h_jet_multi_inclusive->bin(b+1).sumW() / _h_jet_multi_inclusive->bin(b).sumW();
const double err = ( _h_jet_multi_inclusive->bin(b+1).relErr() + _h_jet_multi_inclusive->bin(b).relErr() ) * val;
_h_jet_multi_ratio->addPoint(x, val, ex, err);
}
}
// Normalize std histos
scale(_h_jet_multi_inclusive, crossSectionPerEvent());
scale(_h_jet_pT[0], crossSectionPerEvent());
scale(_h_jet_pT[1], crossSectionPerEvent());
scale(_h_jet_pT[2], crossSectionPerEvent());
scale(_h_jet_pT[3], crossSectionPerEvent());
scale(_h_HT_2, crossSectionPerEvent());
scale(_h_HT_3, crossSectionPerEvent());
scale(_h_HT_4, crossSectionPerEvent());
/// Create ratio histograms
divide(_h_tmp_pTlead_R06_60_3,_h_tmp_pTlead_R06_60_2, _h_pTlead_R06_60_ratio);
divide(_h_tmp_pTlead_R06_80_3,_h_tmp_pTlead_R06_80_2, _h_pTlead_R06_80_ratio);
divide(_h_tmp_pTlead_R06_110_3,_h_tmp_pTlead_R06_110_2, _h_pTlead_R06_110_ratio);
divide(_h_tmp_pTlead_R04_60_3,_h_tmp_pTlead_R04_60_2, _h_pTlead_R04_60_ratio);
divide(_h_tmp_pTlead_R04_80_3,_h_tmp_pTlead_R04_80_2, _h_pTlead_R04_80_ratio);
divide(_h_tmp_pTlead_R04_110_3,_h_tmp_pTlead_R04_110_2, _h_pTlead_R04_110_ratio);
divide(_h_tmp_HT2_R06_3,_h_tmp_HT2_R06_2, _h_HT2_R06_ratio);
divide(_h_tmp_HT2_R04_3,_h_tmp_HT2_R04_2, _h_HT2_R04_ratio);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet_multi_inclusive;
Scatter2DPtr _h_jet_multi_ratio;
vector<Histo1DPtr> _h_jet_pT;
Histo1DPtr _h_HT_2;
Histo1DPtr _h_HT_3;
Histo1DPtr _h_HT_4;
//@}
/// @name Ratio histograms
//@{
Scatter2DPtr _h_pTlead_R06_60_ratio, _h_pTlead_R06_80_ratio, _h_pTlead_R06_110_ratio;
Scatter2DPtr _h_pTlead_R04_60_ratio, _h_pTlead_R04_80_ratio, _h_pTlead_R04_110_ratio;
Scatter2DPtr _h_HT2_R06_ratio, _h_HT2_R04_ratio;
//@}
/// @name Temporary histograms to be divided for the dsigma3/dsigma2 ratios
//@{
Histo1DPtr _h_tmp_pTlead_R06_60_2, _h_tmp_pTlead_R06_80_2, _h_tmp_pTlead_R06_110_2;
Histo1DPtr _h_tmp_pTlead_R06_60_3, _h_tmp_pTlead_R06_80_3, _h_tmp_pTlead_R06_110_3;
Histo1DPtr _h_tmp_pTlead_R04_60_2, _h_tmp_pTlead_R04_80_2, _h_tmp_pTlead_R04_110_2;
Histo1DPtr _h_tmp_pTlead_R04_60_3, _h_tmp_pTlead_R04_80_3, _h_tmp_pTlead_R04_110_3;
Histo1DPtr _h_tmp_HT2_R06_2, _h_tmp_HT2_R06_3, _h_tmp_HT2_R04_2, _h_tmp_HT2_R04_3;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2011_S9128077);
}
diff --git a/analyses/pluginATLAS/ATLAS_2011_S9131140.cc b/analyses/pluginATLAS/ATLAS_2011_S9131140.cc
--- a/analyses/pluginATLAS/ATLAS_2011_S9131140.cc
+++ b/analyses/pluginATLAS/ATLAS_2011_S9131140.cc
@@ -1,110 +1,109 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @brief ATLAS Z pT in Drell-Yan events at 7 TeV
/// @author Elena Yatsenko, Judith Katzy
class ATLAS_2011_S9131140 : public Analysis {
public:
/// Constructor
ATLAS_2011_S9131140()
: Analysis("ATLAS_2011_S9131140")
{
- _sumw_el_bare = 0;
- _sumw_el_dressed = 0;
- _sumw_mu_bare = 0;
- _sumw_mu_dressed = 0;
}
/// @name Analysis methods
//@{
void init() {
// Set up projections
FinalState fs;
Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV;
ZFinder zfinder_dressed_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_dressed_el, "ZFinder_dressed_el");
ZFinder zfinder_bare_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::NOCLUSTER);
declare(zfinder_bare_el, "ZFinder_bare_el");
ZFinder zfinder_dressed_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_dressed_mu, "ZFinder_dressed_mu");
ZFinder zfinder_bare_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::NOCLUSTER);
declare(zfinder_bare_mu, "ZFinder_bare_mu");
// Book histograms
book(_hist_zpt_el_dressed ,1, 1, 2); // electron "dressed"
book(_hist_zpt_el_bare ,1, 1, 3); // electron "bare"
book(_hist_zpt_mu_dressed ,2, 1, 2); // muon "dressed"
book(_hist_zpt_mu_bare ,2, 1, 3); // muon "bare"
+
+ book(_sumw_el_bare, "_sumw_el_bare");
+ book(_sumw_el_dressed, "_sumw_el_dressed");
+ book(_sumw_mu_bare, "_sumw_mu_bare");
+ book(_sumw_mu_dressed, "_sumw_mu_dressed");
}
/// Do the analysis
void analyze(const Event& evt) {
- const double weight = 1.0;
-
const ZFinder& zfinder_dressed_el = apply<ZFinder>(evt, "ZFinder_dressed_el");
if (!zfinder_dressed_el.bosons().empty()) {
- _sumw_el_dressed += weight;
+ _sumw_el_dressed->fill();
const FourMomentum pZ = zfinder_dressed_el.bosons()[0].momentum();
- _hist_zpt_el_dressed->fill(pZ.pT()/GeV, weight);
+ _hist_zpt_el_dressed->fill(pZ.pT()/GeV);
}
const ZFinder& zfinder_bare_el = apply<ZFinder>(evt, "ZFinder_bare_el");
if (!zfinder_bare_el.bosons().empty()) {
- _sumw_el_bare += weight;
+ _sumw_el_bare->fill();
const FourMomentum pZ = zfinder_bare_el.bosons()[0].momentum();
- _hist_zpt_el_bare->fill(pZ.pT()/GeV, weight);
+ _hist_zpt_el_bare->fill(pZ.pT()/GeV);
}
const ZFinder& zfinder_dressed_mu = apply<ZFinder>(evt, "ZFinder_dressed_mu");
if (!zfinder_dressed_mu.bosons().empty()) {
- _sumw_mu_dressed += weight;
+ _sumw_mu_dressed->fill();
const FourMomentum pZ = zfinder_dressed_mu.bosons()[0].momentum();
- _hist_zpt_mu_dressed->fill(pZ.pT()/GeV, weight);
+ _hist_zpt_mu_dressed->fill(pZ.pT()/GeV);
}
const ZFinder& zfinder_bare_mu = apply<ZFinder>(evt, "ZFinder_bare_mu");
if (!zfinder_bare_mu.bosons().empty()) {
- _sumw_mu_bare += weight;
+ _sumw_mu_bare->fill();
const FourMomentum pZ = zfinder_bare_mu.bosons()[0].momentum();
- _hist_zpt_mu_bare->fill(pZ.pT()/GeV, weight);
+ _hist_zpt_mu_bare->fill(pZ.pT()/GeV);
}
}
void finalize() {
if (_sumw_el_dressed != 0) scale(_hist_zpt_el_dressed, 1/_sumw_el_dressed);
if (_sumw_el_bare != 0) scale(_hist_zpt_el_bare, 1/_sumw_el_bare);
if (_sumw_mu_dressed != 0) scale(_hist_zpt_mu_dressed, 1/_sumw_mu_dressed);
if (_sumw_mu_bare != 0) scale(_hist_zpt_mu_bare, 1/_sumw_mu_bare);
}
//@}
private:
- double _sumw_el_bare, _sumw_el_dressed;
- double _sumw_mu_bare, _sumw_mu_dressed;
+ CounterPtr _sumw_el_bare, _sumw_el_dressed;
+ CounterPtr _sumw_mu_bare, _sumw_mu_dressed;
Histo1DPtr _hist_zpt_el_dressed;
Histo1DPtr _hist_zpt_el_bare;
Histo1DPtr _hist_zpt_mu_dressed;
Histo1DPtr _hist_zpt_mu_bare;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2011_S9131140);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1082009.cc b/analyses/pluginATLAS/ATLAS_2012_I1082009.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1082009.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1082009.cc
@@ -1,147 +1,150 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
class ATLAS_2012_I1082009 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ATLAS_2012_I1082009()
- : Analysis("ATLAS_2012_I1082009"),
- _weight25_30(0.),_weight30_40(0.),_weight40_50(0.),
- _weight50_60(0.),_weight60_70(0.),_weight25_70(0.)
+ : Analysis("ATLAS_2012_I1082009")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Input for the jets: No neutrinos, no muons
VetoedFinalState veto;
veto.addVetoPairId(PID::MUON);
veto.vetoNeutrinos();
FastJets jets(veto, FastJets::ANTIKT, 0.6);
declare(jets, "jets");
// unstable final-state for D*
declare(UnstableFinalState(), "UFS");
+ book(_weight25_30, "_weight_25_30");
+ book(_weight30_40, "_weight_30_40");
+ book(_weight40_50, "_weight_40_50");
+ book(_weight50_60, "_weight_50_60");
+ book(_weight60_70, "_weight_60_70");
+ book(_weight25_70, "_weight_25_70");
+
book(_h_pt25_30 , 8,1,1);
book(_h_pt30_40 , 9,1,1);
book(_h_pt40_50 ,10,1,1);
book(_h_pt50_60 ,11,1,1);
book(_h_pt60_70 ,12,1,1);
book(_h_pt25_70 ,13,1,1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
// get the jets
Jets jets;
foreach (const Jet& jet, apply<FastJets>(event, "jets").jetsByPt(25.0*GeV)) {
if ( jet.abseta() < 2.5 ) jets.push_back(jet);
}
// get the D* mesons
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
Particles Dstar;
foreach (const Particle& p, ufs.particles()) {
const int id = p.abspid();
if(id==413) Dstar.push_back(p);
}
// loop over the jobs
foreach (const Jet& jet, jets ) {
double perp = jet.perp();
bool found = false;
double z(0.);
if(perp<25.||perp>70.) continue;
foreach(const Particle & p, Dstar) {
if(p.perp()<7.5) continue;
if(deltaR(p, jet.momentum())<0.6) {
Vector3 axis = jet.p3().unit();
z = axis.dot(p.p3())/jet.E();
if(z<0.3) continue;
found = true;
break;
}
}
- _weight25_70 += weight;
- if(found) _h_pt25_70->fill(z,weight);
+ _weight25_70->fill();
+ if(found) _h_pt25_70->fill(z);
if(perp>=25.&&perp<30.) {
- _weight25_30 += weight;
- if(found) _h_pt25_30->fill(z,weight);
+ _weight25_30->fill();
+ if(found) _h_pt25_30->fill(z);
}
else if(perp>=30.&&perp<40.) {
- _weight30_40 += weight;
- if(found) _h_pt30_40->fill(z,weight);
+ _weight30_40->fill();
+ if(found) _h_pt30_40->fill(z);
}
else if(perp>=40.&&perp<50.) {
- _weight40_50 += weight;
- if(found) _h_pt40_50->fill(z,weight);
+ _weight40_50->fill();
+ if(found) _h_pt40_50->fill(z);
}
else if(perp>=50.&&perp<60.) {
- _weight50_60 += weight;
- if(found) _h_pt50_60->fill(z,weight);
+ _weight50_60->fill();
+ if(found) _h_pt50_60->fill(z);
}
else if(perp>=60.&&perp<70.) {
- _weight60_70 += weight;
- if(found) _h_pt60_70->fill(z,weight);
+ _weight60_70->fill();
+ if(found) _h_pt60_70->fill(z);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_pt25_30,1./_weight25_30);
scale(_h_pt30_40,1./_weight30_40);
scale(_h_pt40_50,1./_weight40_50);
scale(_h_pt50_60,1./_weight50_60);
scale(_h_pt60_70,1./_weight60_70);
scale(_h_pt25_70,1./_weight25_70);
}
//@}
private:
/// @name Histograms
//@{
- double _weight25_30,_weight30_40,_weight40_50;
- double _weight50_60,_weight60_70,_weight25_70;
+ CounterPtr _weight25_30,_weight30_40,_weight40_50;
+ CounterPtr _weight50_60,_weight60_70,_weight25_70;
Histo1DPtr _h_pt25_30;
Histo1DPtr _h_pt30_40;
Histo1DPtr _h_pt40_50;
Histo1DPtr _h_pt50_60;
Histo1DPtr _h_pt60_70;
Histo1DPtr _h_pt25_70;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1082009);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1083318.cc b/analyses/pluginATLAS/ATLAS_2012_I1083318.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1083318.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1083318.cc
@@ -1,251 +1,249 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
namespace Rivet {
/// ATLAS W + jets production at 7 TeV
class ATLAS_2012_I1083318 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2012_I1083318);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs;
IdentifiedFinalState allleptons;
allleptons.acceptIdPair(PID::ELECTRON);
allleptons.acceptIdPair(PID::MUON);
Cut cuts = Cuts::abseta < 2.5 && Cuts::pT > 20*GeV;
DressedLeptons leptons(fs, allleptons, 0.1, cuts);
declare(leptons, "leptons");
// Leading neutrinos for Etmiss
LeadingParticlesFinalState neutrinos(fs);
neutrinos.addParticleIdPair(PID::NU_E);
neutrinos.addParticleIdPair(PID::NU_MU);
neutrinos.setLeadingOnly(true);
declare(neutrinos, "neutrinos");
// Input for the jets: "Neutrinos, electrons, and muons from decays of the
// massive W boson were not used"
VetoedFinalState veto;
veto.addVetoOnThisFinalState(leptons);
veto.addVetoOnThisFinalState(neutrinos);
FastJets jets(veto, FastJets::ANTIKT, 0.4);
jets.useInvisibles(true);
declare(jets, "jets");
for (size_t i = 0; i < 2; ++i) {
book(_h_NjetIncl[i] ,1, 1, i+1);
book(_h_RatioNjetIncl[i], 2, 1, i+1);
book(_h_FirstJetPt_1jet[i] ,3, 1, i+1);
book(_h_FirstJetPt_2jet[i] ,4, 1, i+1);
book(_h_FirstJetPt_3jet[i] ,5, 1, i+1);
book(_h_FirstJetPt_4jet[i] ,6, 1, i+1);
book(_h_SecondJetPt_2jet[i] ,7, 1, i+1);
book(_h_SecondJetPt_3jet[i] ,8, 1, i+1);
book(_h_SecondJetPt_4jet[i] ,9, 1, i+1);
book(_h_ThirdJetPt_3jet[i] ,10, 1, i+1);
book(_h_ThirdJetPt_4jet[i] ,11, 1, i+1);
book(_h_FourthJetPt_4jet[i] ,12, 1, i+1);
book(_h_Ht_1jet[i] ,13, 1, i+1);
book(_h_Ht_2jet[i] ,14, 1, i+1);
book(_h_Ht_3jet[i] ,15, 1, i+1);
book(_h_Ht_4jet[i] ,16, 1, i+1);
book(_h_Minv_2jet[i] ,17, 1, i+1);
book(_h_Minv_3jet[i] ,18, 1, i+1);
book(_h_Minv_4jet[i] ,19, 1, i+1);
book(_h_JetRapidity[i] ,20, 1, i+1);
book(_h_DeltaYElecJet[i] ,21, 1, i+1);
book(_h_SumYElecJet[i] ,22, 1, i+1);
book(_h_DeltaR_2jet[i] ,23, 1, i+1);
book(_h_DeltaY_2jet[i] ,24, 1, i+1);
book(_h_DeltaPhi_2jet[i] ,25, 1, i+1);
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
const vector<DressedLepton>& leptons = apply<DressedLeptons>(event, "leptons").dressedLeptons();
Particles neutrinos = apply<FinalState>(event, "neutrinos").particlesByPt();
if (leptons.size() != 1 || (neutrinos.size() == 0)) {
vetoEvent;
}
FourMomentum lepton = leptons[0].momentum();
FourMomentum p_miss = neutrinos[0].momentum();
if (p_miss.Et() < 25.0*GeV) {
vetoEvent;
}
double mT = sqrt(2.0 * lepton.pT() * p_miss.Et() * (1.0 - cos( lepton.phi()-p_miss.phi()) ) );
if (mT < 40.0*GeV) {
vetoEvent;
}
double jetcuts[] = { 30.0*GeV, 20.0*GeV };
const FastJets& jetpro = apply<FastJets>(event, "jets");
for (size_t i = 0; i < 2; ++i) {
vector<FourMomentum> jets;
double HT = lepton.pT() + p_miss.pT();
foreach (const Jet& jet, jetpro.jetsByPt(jetcuts[i])) {
if (jet.absrap() < 4.4 && deltaR(lepton, jet.momentum()) > 0.5) {
jets.push_back(jet.momentum());
HT += jet.pT();
}
}
- _h_NjetIncl[i]->fill(0.0, weight);
+ _h_NjetIncl[i]->fill(0.0);
// Njet>=1 observables
if (jets.size() < 1) continue;
- _h_NjetIncl[i]->fill(1.0, weight);
- _h_FirstJetPt_1jet[i]->fill(jets[0].pT(), weight);
- _h_JetRapidity[i]->fill(jets[0].rapidity(), weight);
- _h_Ht_1jet[i]->fill(HT, weight);
- _h_DeltaYElecJet[i]->fill(lepton.rapidity()-jets[0].rapidity(), weight);
- _h_SumYElecJet[i]->fill(lepton.rapidity()+jets[0].rapidity(), weight);
+ _h_NjetIncl[i]->fill(1.0);
+ _h_FirstJetPt_1jet[i]->fill(jets[0].pT());
+ _h_JetRapidity[i]->fill(jets[0].rapidity());
+ _h_Ht_1jet[i]->fill(HT);
+ _h_DeltaYElecJet[i]->fill(lepton.rapidity()-jets[0].rapidity());
+ _h_SumYElecJet[i]->fill(lepton.rapidity()+jets[0].rapidity());
// Njet>=2 observables
if (jets.size() < 2) continue;
- _h_NjetIncl[i]->fill(2.0, weight);
- _h_FirstJetPt_2jet[i]->fill(jets[0].pT(), weight);
- _h_SecondJetPt_2jet[i]->fill(jets[1].pT(), weight);
- _h_Ht_2jet[i]->fill(HT, weight);
+ _h_NjetIncl[i]->fill(2.0);
+ _h_FirstJetPt_2jet[i]->fill(jets[0].pT());
+ _h_SecondJetPt_2jet[i]->fill(jets[1].pT());
+ _h_Ht_2jet[i]->fill(HT);
double m2_2jet = FourMomentum(jets[0]+jets[1]).mass2();
- _h_Minv_2jet[i]->fill(m2_2jet>0.0 ? sqrt(m2_2jet) : 0.0, weight);
- _h_DeltaR_2jet[i]->fill(deltaR(jets[0], jets[1]), weight);
- _h_DeltaY_2jet[i]->fill(jets[0].rapidity()-jets[1].rapidity(), weight);
- _h_DeltaPhi_2jet[i]->fill(deltaPhi(jets[0], jets[1]), weight);
+ _h_Minv_2jet[i]->fill(m2_2jet>0.0 ? sqrt(m2_2jet) : 0.0);
+ _h_DeltaR_2jet[i]->fill(deltaR(jets[0], jets[1]));
+ _h_DeltaY_2jet[i]->fill(jets[0].rapidity()-jets[1].rapidity());
+ _h_DeltaPhi_2jet[i]->fill(deltaPhi(jets[0], jets[1]));
// Njet>=3 observables
if (jets.size() < 3) continue;
- _h_NjetIncl[i]->fill(3.0, weight);
- _h_FirstJetPt_3jet[i]->fill(jets[0].pT(), weight);
- _h_SecondJetPt_3jet[i]->fill(jets[1].pT(), weight);
- _h_ThirdJetPt_3jet[i]->fill(jets[2].pT(), weight);
- _h_Ht_3jet[i]->fill(HT, weight);
+ _h_NjetIncl[i]->fill(3.0);
+ _h_FirstJetPt_3jet[i]->fill(jets[0].pT());
+ _h_SecondJetPt_3jet[i]->fill(jets[1].pT());
+ _h_ThirdJetPt_3jet[i]->fill(jets[2].pT());
+ _h_Ht_3jet[i]->fill(HT);
double m2_3jet = FourMomentum(jets[0]+jets[1]+jets[2]).mass2();
- _h_Minv_3jet[i]->fill(m2_3jet>0.0 ? sqrt(m2_3jet) : 0.0, weight);
+ _h_Minv_3jet[i]->fill(m2_3jet>0.0 ? sqrt(m2_3jet) : 0.0);
// Njet>=4 observables
if (jets.size() < 4) continue;
- _h_NjetIncl[i]->fill(4.0, weight);
- _h_FirstJetPt_4jet[i]->fill(jets[0].pT(), weight);
- _h_SecondJetPt_4jet[i]->fill(jets[1].pT(), weight);
- _h_ThirdJetPt_4jet[i]->fill(jets[2].pT(), weight);
- _h_FourthJetPt_4jet[i]->fill(jets[3].pT(), weight);
- _h_Ht_4jet[i]->fill(HT, weight);
+ _h_NjetIncl[i]->fill(4.0);
+ _h_FirstJetPt_4jet[i]->fill(jets[0].pT());
+ _h_SecondJetPt_4jet[i]->fill(jets[1].pT());
+ _h_ThirdJetPt_4jet[i]->fill(jets[2].pT());
+ _h_FourthJetPt_4jet[i]->fill(jets[3].pT());
+ _h_Ht_4jet[i]->fill(HT);
double m2_4jet = FourMomentum(jets[0]+jets[1]+jets[2]+jets[3]).mass2();
- _h_Minv_4jet[i]->fill(m2_4jet>0.0 ? sqrt(m2_4jet) : 0.0, weight);
+ _h_Minv_4jet[i]->fill(m2_4jet>0.0 ? sqrt(m2_4jet) : 0.0);
// Njet>=5 observables
if (jets.size() < 5) continue;
- _h_NjetIncl[i]->fill(5.0, weight);
+ _h_NjetIncl[i]->fill(5.0);
}
}
/// Normalise histograms etc., after the run
void finalize() {
for (size_t i = 0; i < 2; ++i) {
// Construct jet multiplicity ratio
for (size_t n = 1; n < _h_NjetIncl[i]->numBins(); ++n) {
YODA::HistoBin1D& b0 = _h_NjetIncl[i]->bin(n-1);
YODA::HistoBin1D& b1 = _h_NjetIncl[i]->bin(n);
if (b0.height() == 0.0 || b1.height() == 0.0) continue;
_h_RatioNjetIncl[i]->addPoint(n, b1.height()/b0.height(), 0.5,
b1.height()/b0.height() * (b0.relErr() + b1.relErr()));
}
// Scale all histos to the cross section
const double factor = crossSection()/sumOfWeights();
scale(_h_DeltaPhi_2jet[i], factor);
scale(_h_DeltaR_2jet[i], factor);
scale(_h_DeltaY_2jet[i], factor);
scale(_h_DeltaYElecJet[i], factor);
scale(_h_FirstJetPt_1jet[i], factor);
scale(_h_FirstJetPt_2jet[i], factor);
scale(_h_FirstJetPt_3jet[i], factor);
scale(_h_FirstJetPt_4jet[i], factor);
scale(_h_FourthJetPt_4jet[i], factor);
scale(_h_Ht_1jet[i], factor);
scale(_h_Ht_2jet[i], factor);
scale(_h_Ht_3jet[i], factor);
scale(_h_Ht_4jet[i], factor);
scale(_h_JetRapidity[i], factor);
scale(_h_Minv_2jet[i], factor);
scale(_h_Minv_3jet[i], factor);
scale(_h_Minv_4jet[i], factor);
scale(_h_NjetIncl[i], factor);
scale(_h_SecondJetPt_2jet[i], factor);
scale(_h_SecondJetPt_3jet[i], factor);
scale(_h_SecondJetPt_4jet[i], factor);
scale(_h_SumYElecJet[i], factor);
scale(_h_ThirdJetPt_3jet[i], factor);
scale(_h_ThirdJetPt_4jet[i], factor);
}
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_DeltaPhi_2jet[2];
Histo1DPtr _h_DeltaR_2jet[2];
Histo1DPtr _h_DeltaY_2jet[2];
Histo1DPtr _h_DeltaYElecJet[2];
Histo1DPtr _h_FirstJetPt_1jet[2];
Histo1DPtr _h_FirstJetPt_2jet[2];
Histo1DPtr _h_FirstJetPt_3jet[2];
Histo1DPtr _h_FirstJetPt_4jet[2];
Histo1DPtr _h_FourthJetPt_4jet[2];
Histo1DPtr _h_Ht_1jet[2];
Histo1DPtr _h_Ht_2jet[2];
Histo1DPtr _h_Ht_3jet[2];
Histo1DPtr _h_Ht_4jet[2];
Histo1DPtr _h_JetRapidity[2];
Histo1DPtr _h_Minv_2jet[2];
Histo1DPtr _h_Minv_3jet[2];
Histo1DPtr _h_Minv_4jet[2];
Histo1DPtr _h_NjetIncl[2];
Scatter2DPtr _h_RatioNjetIncl[2];
Histo1DPtr _h_SecondJetPt_2jet[2];
Histo1DPtr _h_SecondJetPt_3jet[2];
Histo1DPtr _h_SecondJetPt_4jet[2];
Histo1DPtr _h_SumYElecJet[2];
Histo1DPtr _h_ThirdJetPt_3jet[2];
Histo1DPtr _h_ThirdJetPt_4jet[2];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1083318);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1084540.cc b/analyses/pluginATLAS/ATLAS_2012_I1084540.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1084540.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1084540.cc
@@ -1,231 +1,230 @@
// -*- C++ -*-
/**
* @name ATLAS Diffractive Gaps Rivet Analysis
* @author Tim Martin, tim.martin@cern.ch
* @version 1.0
* @date 16/01/2012
* @see http://arxiv.org/abs/1201.2808
* @note pp, sqrt(s) = 7 TeV
* @note Rapidity gap finding algorithm designed to compliment
* the ATLAS detector acceptance. Forward rapidity gap sizes
* are calculated for each event, considering all stable
* particles above pT cut values 200, 400, 600 and 800 MeV in
* turn. A forward rapidity gap is defined to be the largest
* continuous region stretching inward from either edge of the
* detector at eta = |4.9| which contains zero particles above
* pT Cut. Soft diffractive topologies are isolated at large
* gap sizes.
*
*/
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
namespace Rivet {
class ATLAS_2012_I1084540 : public Analysis {
public:
ATLAS_2012_I1084540() : Analysis("ATLAS_2012_I1084540") {}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
//All final states. Rapidity range = ATLAS calorimetry. Lowest pT cut = 200 MeV.
const FinalState cnfs2(-_etaMax, _etaMax, 0.2 * GeV);
const FinalState cnfs4(-_etaMax, _etaMax, 0.4 * GeV);
const FinalState cnfs6(-_etaMax, _etaMax, 0.6 * GeV);
const FinalState cnfs8(-_etaMax, _etaMax, 0.8 * GeV);
declare(cnfs2, "CNFS2");
declare(cnfs4, "CNFS4");
declare(cnfs6, "CNFS6");
declare(cnfs8, "CNFS8");
_etaBinSize = (2. * _etaMax)/(double)_etaBins;
//Book histogram
book(_h_DeltaEtaF_200 ,1, 1, 1);
book(_h_DeltaEtaF_400 ,2, 1, 1);
book(_h_DeltaEtaF_600 ,3, 1, 1);
book(_h_DeltaEtaF_800 ,4, 1, 1);
}
private:
void fillMap(const FinalState& fs, bool* energyMap, double pTcut) {
// Fill true/false array by iterating over particles and compare their
// pT with pTcut
foreach (const Particle& p, fs.particles(cmpMomByEta)) {
int checkBin = -1;
double checkEta = -_etaMax;
while (1) {
checkEta += _etaBinSize;
++checkBin;
if (p.eta() < checkEta) {
energyMap[checkBin] = (p.pT() > pTcut * GeV);
break;
}
}
}
}
public:
/// Perform the per-event analysis
void analyze(const Event& event) {
static unsigned int event_count = 0;
++event_count;
- const double weight = 1.0;
const FinalState& fs2 = apply<FinalState>(event, "CNFS2");
const FinalState& fs4 = apply<FinalState>(event, "CNFS4");
const FinalState& fs6 = apply<FinalState>(event, "CNFS6");
const FinalState& fs8 = apply<FinalState>(event, "CNFS8");
// Set up Yes/No arrays for energy in each eta bin at each pT cut
bool energyMap_200[_etaBins];
bool energyMap_400[_etaBins];
bool energyMap_600[_etaBins];
bool energyMap_800[_etaBins];
for (int i = 0; i < _etaBins; ++i) {
energyMap_200[i] = false;
energyMap_400[i] = false;
energyMap_600[i] = false;
energyMap_800[i] = false;
}
// Veto bins based on final state particles > Cut (Where Cut = 200 - 800 MeV pT)
fillMap(fs2, energyMap_200, 0.2);
fillMap(fs4, energyMap_400, 0.4);
fillMap(fs6, energyMap_600, 0.6);
fillMap(fs8, energyMap_800, 0.8);
// Apply gap finding algorithm
// Detector layout follows...
// -Eta [Proton --- DetectorCSide --- DetectorBarrel --- DetectorASide --- Proton] +Eta
bool gapDirectionAt200 = false; //False is gap on C size, True is gap on A side.
double largestEdgeGap_200 = 0.;
double largestEdgeGap_400 = 0.;
double largestEdgeGap_600 = 0.;
double largestEdgeGap_800 = 0.;
for (int E = 200; E <= 800; E += 200) {
double EdgeGapSizeA = -1, EdgeGapSizeC = -1;
bool* energyMap = 0;
switch (E) {
case 200: energyMap = energyMap_200; break;
case 400: energyMap = energyMap_400; break;
case 600: energyMap = energyMap_600; break;
case 800: energyMap = energyMap_800; break;
}
// Look left to right
for (int a = 0; a < _etaBins; ++a) {
if (energyMap[a] == true) {
EdgeGapSizeA = (_etaBinSize * a);
break;
}
}
// And look right to left
for (int c = _etaBins-1; c >= 0; --c) {
if (energyMap[c] == true) {
EdgeGapSizeC = (2 * _etaMax) - (_etaBinSize * (c+1));
if (fuzzyEquals(EdgeGapSizeC, 4.47035e-08)) EdgeGapSizeC = 0.0;
break;
}
}
// Put your hands on your hips
// Find the largest gap
double largestEdgeGap = 0.;
if (E == 200) {
// If the 200 MeV pass, take the biggest of the two gaps. Make note of which side for higher pT cuts.
largestEdgeGap = std::max(EdgeGapSizeA,EdgeGapSizeC);
gapDirectionAt200 = (EdgeGapSizeA > EdgeGapSizeC);
} else {
// Use the direction from 200 MeV pass, most accurate measure of which side gap is on.
if (gapDirectionAt200) {
largestEdgeGap = EdgeGapSizeA;
}
else largestEdgeGap = EdgeGapSizeC;
}
// Check case of empty detector
if (largestEdgeGap < 0.0) largestEdgeGap = 2.0 * _etaMax;
// Fill bin centre
switch (E) {
- case 200: _h_DeltaEtaF_200->fill(largestEdgeGap + _etaBinSize/2., weight); break;
- case 400: _h_DeltaEtaF_400->fill(largestEdgeGap + _etaBinSize/2., weight); break;
- case 600: _h_DeltaEtaF_600->fill(largestEdgeGap + _etaBinSize/2., weight); break;
- case 800: _h_DeltaEtaF_800->fill(largestEdgeGap + _etaBinSize/2., weight); break;
+ case 200: _h_DeltaEtaF_200->fill(largestEdgeGap + _etaBinSize/2.); break;
+ case 400: _h_DeltaEtaF_400->fill(largestEdgeGap + _etaBinSize/2.); break;
+ case 600: _h_DeltaEtaF_600->fill(largestEdgeGap + _etaBinSize/2.); break;
+ case 800: _h_DeltaEtaF_800->fill(largestEdgeGap + _etaBinSize/2.); break;
}
if (E == 200) largestEdgeGap_200 = largestEdgeGap;
if (E == 400) largestEdgeGap_400 = largestEdgeGap;
if (E == 600) largestEdgeGap_600 = largestEdgeGap;
if (E == 800) largestEdgeGap_800 = largestEdgeGap;
}
// Algorithm result every 1000 events
if (event_count % 1000 == 0) {
for (int E = 200; E <= 800; E += 200) {
bool* energyMap = 0;
double largestEdgeGap = 0;
switch (E) {
case 200: energyMap = energyMap_200; largestEdgeGap = largestEdgeGap_200; break;
case 400: energyMap = energyMap_400; largestEdgeGap = largestEdgeGap_400; break;
case 600: energyMap = energyMap_600; largestEdgeGap = largestEdgeGap_600; break;
case 800: energyMap = energyMap_800; largestEdgeGap = largestEdgeGap_800; break;
}
MSG_DEBUG("Largest Forward Gap at pT Cut " << E << " MeV=" << largestEdgeGap
<< " eta, NFinalState pT > 200 in ATLAS acceptance:" << fs2.particles().size());
std::string hitPattern = "Detector HitPattern=-4.9[";
for (int a = 0; a < _etaBins; ++a) {
if (energyMap[a] == true) hitPattern += "X";
else hitPattern += "_";
}
hitPattern += "]4.9";
MSG_DEBUG(hitPattern);
std::string gapArrow = " ";
if (!gapDirectionAt200) {
int drawSpaces = (int)(_etaBins - (largestEdgeGap/_etaBinSize) + 0.5);
for (int i = 0; i < drawSpaces; ++i) gapArrow += " ";
}
int drawArrows = (int)((largestEdgeGap/_etaBinSize) + 0.5);
for (int i = 0; i < drawArrows; ++i) gapArrow += "^";
MSG_DEBUG(gapArrow);
}
}
}
/// Normalise histograms after the run, Scale to cross section
void finalize() {
MSG_DEBUG("Cross Section=" << crossSection() / millibarn << "mb, SumOfWeights=" << sumOfWeights());
scale(_h_DeltaEtaF_200, (crossSection() / millibarn)/sumOfWeights());
scale(_h_DeltaEtaF_400, (crossSection() / millibarn)/sumOfWeights());
scale(_h_DeltaEtaF_600, (crossSection() / millibarn)/sumOfWeights());
scale(_h_DeltaEtaF_800, (crossSection() / millibarn)/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_DeltaEtaF_200;
Histo1DPtr _h_DeltaEtaF_400;
Histo1DPtr _h_DeltaEtaF_600;
Histo1DPtr _h_DeltaEtaF_800;
//@}
/// @name Private variables
//@{
static constexpr int _etaBins = 49;
static constexpr double _etaMax = 4.9;
double _etaBinSize;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1084540);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1091481.cc b/analyses/pluginATLAS/ATLAS_2012_I1091481.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1091481.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1091481.cc
@@ -1,179 +1,177 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ATLAS_2012_I1091481 : public Analysis {
public:
/// Constructor
ATLAS_2012_I1091481()
: Analysis("ATLAS_2012_I1091481")
{ }
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs100(Cuts::abseta < 2.5 && Cuts::pT > 0.1*GeV);
declare(cfs100,"CFS100");
ChargedFinalState cfs500(Cuts::abseta < 2.5 && Cuts::pT > 0.5*GeV);
declare(cfs500,"CFS500");
// collision energy
int isqrts = -1;
if (fuzzyEquals(sqrtS(), 900*GeV)) isqrts = 2;
if (fuzzyEquals(sqrtS(), 7*TeV)) isqrts = 1;
assert(isqrts > 0);
book(_sE_10_100 ,isqrts, 1, 1);
book(_sE_1_100 ,isqrts, 1, 2);
book(_sE_10_500 ,isqrts, 1, 3);
book(_sEta_10_100 ,isqrts, 2, 1);
book(_sEta_1_100 ,isqrts, 2, 2);
book(_sEta_10_500 ,isqrts, 2, 3);
- norm_inclusive = 0.;
- norm_lowPt = 0.;
- norm_pt500 = 0.;
+ book(norm_inclusive, "norm_inclusive");
+ book(norm_lowPt, "norm_lowPt");
+ book(norm_pt500, "norm_pt500");
}
// Recalculate particle energy assuming pion mass
double getPionEnergy(const Particle& p) {
double m_pi = 0.1396*GeV;
double p2 = p.p3().mod2()/(GeV*GeV);
return sqrt(sqr(m_pi) + p2);
}
// S_eta core for one event
//
// -1 + 1/Nch * |sum_j^Nch exp[i*(xi eta_j - Phi_j)]|^2
//
double getSeta(const Particles& part, double xi) {
std::complex<double> c_eta (0.0, 0.0);
foreach (const Particle& p, part) {
double eta = p.eta();
double phi = p.phi();
double arg = xi*eta-phi;
std::complex<double> temp(cos(arg), sin(arg));
c_eta += temp;
}
return std::norm(c_eta)/part.size() - 1.0;
}
// S_E core for one event
//
// -1 + 1/Nch * |sum_j^Nch exp[i*(omega X_j - Phi_j)]|^2
//
double getSE(const Particles& part, double omega) {
double Xj = 0.0;
std::complex<double> c_E (0.0, 0.0);
for (unsigned int i=0; i < part.size(); ++i) {
Xj += 0.5*getPionEnergy(part[i]);
double phi = part[i].phi();
double arg = omega*Xj - phi;
std::complex<double> temp(cos(arg), sin(arg));
c_E += temp;
Xj += 0.5*getPionEnergy(part[i]);
}
return std::norm(c_E)/part.size() - 1.0;
}
// Convenient fill function
- void fillS(Histo1DPtr h, const Particles& part, double weight, bool SE=true) {
+ void fillS(Histo1DPtr h, const Particles& part, bool SE=true) {
// Loop over bins, take bin centers as parameter values
for(size_t i=0; i < h->numBins(); ++i) {
double x = h->bin(i).xMid();
double width = h->bin(i).xMax() - h->bin(i).xMin();
double y;
if(SE) y = getSE(part, x);
else y = getSeta(part, x);
- h->fill(x, y * width * weight);
+ h->fill(x, y * width);
// Histo1D objects will be converted to Scatter2D objects for plotting
// As part of this conversion, Rivet will divide by bin width
// However, we want the (x,y) of the Scatter2D to be the (binCenter, sumW) of
// the current Histo1D. This is why in the above line we multiply by bin width,
// so as to undo later division by bin width.
//
// Could have used Scatter2D objects in the first place, but they cannot be merged
// as easily as Histo1Ds can using yodamerge (missing ScaledBy attribute)
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- double weight = 1.0;
-
// Charged fs
const ChargedFinalState& cfs100 = apply<ChargedFinalState>(event, "CFS100");
const Particles part100 = cfs100.particles(cmpMomByEta);
const ChargedFinalState& cfs500 = apply<ChargedFinalState>(event, "CFS500");
const Particles& part500 = cfs500.particles(cmpMomByEta);
// Veto event if the most inclusive phase space has less than 10 particles and the max pT is > 10 GeV
if (part100.size() < 11) vetoEvent;
double ptmax = cfs100.particlesByPt()[0].pT()/GeV;
if (ptmax > 10.0) vetoEvent;
// Fill the pt>100, pTmax<10 GeV histos
- fillS(_sE_10_100, part100, weight, true);
- fillS(_sEta_10_100, part100, weight, false);
- norm_inclusive += weight;
+ fillS(_sE_10_100, part100, true);
+ fillS(_sEta_10_100, part100, false);
+ norm_inclusive->fill();
// Fill the pt>100, pTmax<1 GeV histos
if (ptmax < 1.0) {
- fillS(_sE_1_100, part100, weight, true);
- fillS(_sEta_1_100, part100, weight, false);
- norm_lowPt += weight;
+ fillS(_sE_1_100, part100, true);
+ fillS(_sEta_1_100, part100, false);
+ norm_lowPt->fill();
}
// Fill the pt>500, pTmax<10 GeV histos
if (part500.size() > 10) {
- fillS(_sE_10_500, part500, weight, true );
- fillS(_sEta_10_500, part500, weight, false);
- norm_pt500 += weight;
+ fillS(_sE_10_500, part500, true );
+ fillS(_sEta_10_500, part500, false);
+ norm_pt500->fill();
}
}
/// Normalise histograms etc., after the run
void finalize() {
// The scaling takes the multiple fills per event into account
scale(_sE_10_100, 1.0/norm_inclusive);
scale(_sE_1_100 , 1.0/norm_lowPt);
scale(_sE_10_500, 1.0/norm_pt500);
scale(_sEta_10_100, 1.0/norm_inclusive);
scale(_sEta_1_100 , 1.0/norm_lowPt);
scale(_sEta_10_500, 1.0/norm_pt500);
}
//@}
private:
Histo1DPtr _sE_10_100;
Histo1DPtr _sE_1_100;
Histo1DPtr _sE_10_500;
Histo1DPtr _sEta_10_100;
Histo1DPtr _sEta_1_100;
Histo1DPtr _sEta_10_500;
- double norm_inclusive;
- double norm_lowPt;
- double norm_pt500;
+ CounterPtr norm_inclusive;
+ CounterPtr norm_lowPt;
+ CounterPtr norm_pt500;
};
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1091481);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1093734.cc b/analyses/pluginATLAS/ATLAS_2012_I1093734.cc.broken
rename from analyses/pluginATLAS/ATLAS_2012_I1093734.cc
rename to analyses/pluginATLAS/ATLAS_2012_I1093734.cc.broken
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sat, May 3, 6:39 AM (1 d, 17 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4983106
Default Alt Text
(50 KB)
Attached To
rRIVETHG rivethg
Event Timeline
Log In to Comment