Page MenuHomeHEPForge

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/analyses/pluginALICE/ALICE_2010_S8624100.cc b/analyses/pluginALICE/ALICE_2010_S8624100.cc
--- a/analyses/pluginALICE/ALICE_2010_S8624100.cc
+++ b/analyses/pluginALICE/ALICE_2010_S8624100.cc
@@ -1,94 +1,92 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2010_S8624100 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ALICE_2010_S8624100()
: Analysis("ALICE_2010_S8624100")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs05(-0.5, 0.5);
ChargedFinalState cfs10(-1.0, 1.0);
ChargedFinalState cfs13(-1.3, 1.3);
declare(cfs05, "CFS05");
declare(cfs10, "CFS10");
declare(cfs13, "CFS13");
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
book(_h_dN_dNch_05 ,11, 1, 1);
book(_h_dN_dNch_10 ,12, 1, 1);
book(_h_dN_dNch_13 ,13, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 2360, 1E-3)) {
book(_h_dN_dNch_05 ,17, 1, 1);
book(_h_dN_dNch_10 ,18, 1, 1);
book(_h_dN_dNch_13 ,19, 1, 1);
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
- const ChargedFinalState& charged_05 = apply<ChargedFinalState>(event, "CFS05");
+ const ChargedFinalState& charged_05 = apply<ChargedFinalState>(event, "CFS05");
const ChargedFinalState& charged_10 = apply<ChargedFinalState>(event, "CFS10");
const ChargedFinalState& charged_13 = apply<ChargedFinalState>(event, "CFS13");
- _h_dN_dNch_05->fill(charged_05.size(), weight);
- _h_dN_dNch_10->fill(charged_10.size(), weight);
- _h_dN_dNch_13->fill(charged_13.size(), weight);
+ _h_dN_dNch_05->fill(charged_05.size());
+ _h_dN_dNch_10->fill(charged_10.size());
+ _h_dN_dNch_13->fill(charged_13.size());
}
/// Normalise histograms etc., after the run
void finalize() {
normalize(_h_dN_dNch_05);
normalize(_h_dN_dNch_10);
normalize(_h_dN_dNch_13);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_dN_dNch_05;
Histo1DPtr _h_dN_dNch_10;
Histo1DPtr _h_dN_dNch_13;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2010_S8624100);
}
diff --git a/analyses/pluginALICE/ALICE_2010_S8625980.cc b/analyses/pluginALICE/ALICE_2010_S8625980.cc
--- a/analyses/pluginALICE/ALICE_2010_S8625980.cc
+++ b/analyses/pluginALICE/ALICE_2010_S8625980.cc
@@ -1,99 +1,97 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2010_S8625980 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ALICE_2010_S8625980()
- : Analysis("ALICE_2010_S8625980"),
- _Nevt_after_cuts(0.0)
+ : Analysis("ALICE_2010_S8625980")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs(-1.0, 1.0);
declare(cfs, "CFS");
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
book(_h_dN_deta ,4, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 2360, 1E-3)) {
book(_h_dN_deta ,5, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) {
book(_h_dN_deta ,6, 1, 1);
book(_h_dN_dNch ,3, 1, 1);
}
+ book(_Nevt_after_cuts, "Nevt_after_cuts");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
const ChargedFinalState& charged = apply<ChargedFinalState>(event, "CFS");
if (charged.size() < 1) {
vetoEvent;
}
- _Nevt_after_cuts += weight;
+ _Nevt_after_cuts->fill();
foreach (const Particle& p, charged.particles()) {
const double eta = p.eta();
- _h_dN_deta->fill(eta, weight);
+ _h_dN_deta->fill(eta);
}
if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) {
- _h_dN_dNch->fill(charged.size(), weight);
+ _h_dN_dNch->fill(charged.size());
}
}
/// Normalise histograms etc., after the run
void finalize() {
if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) {
normalize(_h_dN_dNch);
}
scale(_h_dN_deta, 1.0/_Nevt_after_cuts);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_dN_deta;
Histo1DPtr _h_dN_dNch;
- double _Nevt_after_cuts;
+ CounterPtr _Nevt_after_cuts;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2010_S8625980);
}
diff --git a/analyses/pluginALICE/ALICE_2010_S8706239.cc b/analyses/pluginALICE/ALICE_2010_S8706239.cc
--- a/analyses/pluginALICE/ALICE_2010_S8706239.cc
+++ b/analyses/pluginALICE/ALICE_2010_S8706239.cc
@@ -1,101 +1,100 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2010_S8706239 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ALICE_2010_S8706239()
- : Analysis("ALICE_2010_S8706239"),
- _Nevt_after_cuts(0.0)
+ : Analysis("ALICE_2010_S8706239")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs(-0.8, 0.8, 0.15);
declare(cfs, "CFS");
book(_h_pT ,4, 1, 1);
book(_h_pT_Nch_015 ,11, 1, 1);
book(_h_pT_Nch_05 ,12, 1, 1);
+ book(_Nevt_after_cuts,"Nevt_after_cuts");
+
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
const ChargedFinalState& charged = apply<ChargedFinalState>(event, "CFS");
- _Nevt_after_cuts += weight;
+ _Nevt_after_cuts->fill();
// Get number of particles that fulfill certain pT requirements
int Nch_015 = 0;
int Nch_05 = 0;
foreach (const Particle& p, charged.particles()) {
double pT = p.pT()/GeV;
if (pT < 4.0) Nch_015++;
if (pT > 0.5 && pT < 4.0) Nch_05++;
}
// Now we can fill histograms
foreach (const Particle& p, charged.particles()) {
double pT = p.pT()/GeV;
- if (pT < 4.0) _h_pT_Nch_015 ->fill(Nch_015, pT, weight);
- if (pT > 0.5 && pT < 4.0) _h_pT_Nch_05 ->fill(Nch_05, pT, weight);
+ if (pT < 4.0) _h_pT_Nch_015 ->fill(Nch_015, pT);
+ if (pT > 0.5 && pT < 4.0) _h_pT_Nch_05 ->fill(Nch_05, pT);
// To get the Yield, fill appropriate weight 1/(2PI * pT * d eta)
- _h_pT->fill(pT, weight /(TWOPI*pT*1.6) );
+ _h_pT->fill(pT, 1.0 /(TWOPI*pT*1.6) );
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_pT, 1.0/_Nevt_after_cuts);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_pT;
Profile1DPtr _h_pT_Nch_015 ;
Profile1DPtr _h_pT_Nch_05 ;
- double _Nevt_after_cuts;
+ CounterPtr _Nevt_after_cuts;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2010_S8706239);
}
diff --git a/analyses/pluginALICE/ALICE_2011_S8909580.cc b/analyses/pluginALICE/ALICE_2011_S8909580.cc
--- a/analyses/pluginALICE/ALICE_2011_S8909580.cc
+++ b/analyses/pluginALICE/ALICE_2011_S8909580.cc
@@ -1,103 +1,102 @@
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
class ALICE_2011_S8909580 : public Analysis {
public:
ALICE_2011_S8909580()
: Analysis("ALICE_2011_S8909580")
{}
public:
void init() {
const UnstableFinalState ufs(Cuts::abseta < 15);
declare(ufs, "UFS");
book(_histPtK0s ,1, 1, 1);
book(_histPtLambda ,2, 1, 1);
book(_histPtAntiLambda ,3, 1, 1);
book(_histPtXi ,4, 1, 1);
book(_histPtPhi ,5, 1, 1);
book(_temp_h_Lambdas ,"TMP/h_Lambdas", refData(6, 1, 1));
book(_temp_h_Kzeros ,"TMP/h_Kzeros", refData(6, 1, 1));
book(_h_LamKzero ,6, 1, 1);
}
void analyze(const Event& event) {
- const double weight = 1.0;
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
foreach (const Particle& p, ufs.particles()) {
const double absrap = p.absrap();
const double pT = p.pT()/GeV;
if (absrap < 0.8) {
switch(p.pid()) {
case 3312:
case -3312:
if ( !( p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) {
- _histPtXi->fill(pT, weight);
+ _histPtXi->fill(pT);
}
break;
if (absrap < 0.75) {
case 310:
- _histPtK0s->fill(pT, weight);
- _temp_h_Kzeros->fill(pT, 2*weight);
+ _histPtK0s->fill(pT);
+ _temp_h_Kzeros->fill(pT, 2);
break;
case 3122:
if ( !( p.hasAncestor(3322) || p.hasAncestor(-3322) ||
p.hasAncestor(3312) || p.hasAncestor(-3312) ||
p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) {
- _histPtLambda->fill(pT, weight);
- _temp_h_Lambdas->fill(pT, weight);
+ _histPtLambda->fill(pT);
+ _temp_h_Lambdas->fill(pT);
}
break;
case -3122:
if ( !( p.hasAncestor(3322) || p.hasAncestor(-3322) ||
p.hasAncestor(3312) || p.hasAncestor(-3312) ||
p.hasAncestor(3334) || p.hasAncestor(-3334) ) ) {
- _histPtAntiLambda->fill(pT, weight);
- _temp_h_Lambdas->fill(pT, weight);
+ _histPtAntiLambda->fill(pT);
+ _temp_h_Lambdas->fill(pT);
}
break;
}
if (absrap<0.6) {
case 333:
- _histPtPhi->fill(pT, weight);
+ _histPtPhi->fill(pT);
break;
}
}
}
}
}
void finalize() {
scale(_histPtK0s, 1./(1.5*sumOfWeights()));
scale(_histPtLambda, 1./(1.5*sumOfWeights()));
scale(_histPtAntiLambda, 1./(1.5*sumOfWeights()));
scale(_histPtXi, 1./(1.6*sumOfWeights()));
scale(_histPtPhi, 1./(1.2*sumOfWeights()));
divide(_temp_h_Lambdas, _temp_h_Kzeros, _h_LamKzero);
}
private:
Histo1DPtr _histPtK0s, _histPtLambda, _histPtAntiLambda, _histPtXi, _histPtPhi;
Histo1DPtr _temp_h_Lambdas, _temp_h_Kzeros;
Scatter2DPtr _h_LamKzero;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2011_S8909580);
}
diff --git a/analyses/pluginALICE/ALICE_2011_S8945144.cc b/analyses/pluginALICE/ALICE_2011_S8945144.cc
--- a/analyses/pluginALICE/ALICE_2011_S8945144.cc
+++ b/analyses/pluginALICE/ALICE_2011_S8945144.cc
@@ -1,105 +1,104 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2011_S8945144 : public Analysis {
public:
ALICE_2011_S8945144()
: Analysis("ALICE_2011_S8945144")
{}
public:
void init() {
const ChargedFinalState cfs(-15, 15);
declare(cfs, "CFS");
book(_histPtPions ,"d01-x01-y01");
book(_histPtAntiPions ,"d01-x01-y02");
book(_histPtKaons ,"d02-x01-y01");
book(_histPtAntiKaons ,"d02-x01-y02");
book(_histPtProtons ,"d03-x01-y01");
book(_histPtAntiProtons ,"d03-x01-y02");
book(_histAveragePt ,"d04-x01-y01");
}
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
foreach (const Particle& p, cfs.particles()) {
if(p.absrap()<0.5) {
switch (p.pid()) {
case 211:
- _histPtPions->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtPions->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -211:
- _histPtAntiPions->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtAntiPions->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case 2212:
if ( !(p.hasAncestor(3322) || // Xi0
p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda
p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/-
p.hasAncestor(3312) || p.hasAncestor(-3312) ) ) { // Xi-/+
- _histPtProtons->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtProtons->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
}
break;
case -2212:
if ( !(p.hasAncestor(3322) || // Xi0
p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda
p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/-
p.hasAncestor(3312) || p.hasAncestor(-3312) ) ) { // Xi-/+
- _histPtAntiProtons->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtAntiProtons->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
}
break;
case 321:
- _histPtKaons->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtKaons->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -321:
- _histPtAntiKaons->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtAntiKaons->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
}
}
}
}
void finalize() {
scale(_histPtPions, 1./sumOfWeights());
scale(_histPtProtons, 1./sumOfWeights());
scale(_histPtKaons, 1./sumOfWeights());
scale(_histPtAntiPions, 1./sumOfWeights());
scale(_histPtAntiProtons, 1./sumOfWeights());
scale(_histPtAntiKaons, 1./sumOfWeights());
}
private:
Histo1DPtr _histPtPions;
Histo1DPtr _histPtProtons;
Histo1DPtr _histPtKaons;
Histo1DPtr _histPtAntiPions;
Histo1DPtr _histPtAntiProtons;
Histo1DPtr _histPtAntiKaons;
Profile1DPtr _histAveragePt;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2011_S8945144);
}
diff --git a/analyses/pluginALICE/ALICE_2012_I1181770.cc b/analyses/pluginALICE/ALICE_2012_I1181770.cc
--- a/analyses/pluginALICE/ALICE_2012_I1181770.cc
+++ b/analyses/pluginALICE/ALICE_2012_I1181770.cc
@@ -1,116 +1,114 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2012_I1181770 : public Analysis {
public:
ALICE_2012_I1181770()
: Analysis("ALICE_2012_I1181770")
{ }
void init() {
// Projection setup
declare(ChargedFinalState(), "CFS");
// Book (energy-specific) histograms
int isqrts = -1;
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) isqrts = 1;
else if (fuzzyEquals(sqrtS()/GeV, 2760, 1E-3)) isqrts = 2;
else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) isqrts = 3;
assert(isqrts > 0);
book(_h_frac_sd_inel, 1, 1, isqrts);
book(_h_frac_dd_inel, 2, 1, isqrts);
book(_h_xsec_sd , 3, 1, isqrts);
book(_h_xsec_dd , 4, 1, isqrts);
book(_h_xsec_inel , 5, 1, isqrts);
}
void analyze(const Event& event) {
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent; // need at least two particles to calculate gaps
- const double weight = 1.0;
-
// Fill INEL plots for each event
- _h_xsec_inel->fill(sqrtS()/GeV, weight);
+ _h_xsec_inel->fill(sqrtS()/GeV);
// Identify particles with most positive/most negative rapidities
const Particles particlesByRap = cfs.particles(cmpMomByRap);
const Particle pslowest = particlesByRap.front();
const Particle pfastest = particlesByRap.back();
// Find gap sizes
const Particles particlesByEta = cfs.particles(cmpMomByEta); // sorted from minus to plus
const size_t num_particles = particlesByEta.size();
vector<double> gaps;
for (size_t ip = 1; ip < num_particles; ++ip) {
const Particle& p1 = particlesByEta[ip-1];
const Particle& p2 = particlesByEta[ip];
const double gap = p2.eta() - p1.eta();
assert(gap >= 0);
gaps.push_back(gap);
}
// First, last, and largest gaps
const double gapmax = *max_element(gaps.begin(), gaps.end());
const double gapbwd = gaps.front();
const double gapfwd = gaps.back();
// Mx calculation
FourMomentum p4lead;
if (pslowest.pid() == PID::PROTON && pfastest.pid() == PID::PROTON) {
p4lead = (fabs(pslowest.rapidity()) > fabs(pfastest.rapidity())) ? pslowest.momentum() : pfastest.momentum();
} else if (pslowest.pid() == PID::PROTON) {
p4lead = pslowest.momentum();
} else if (pfastest.pid() == PID::PROTON) {
p4lead = pfastest.momentum();
}
const double Mx = sqrt( (sqrtS()-p4lead.E()-p4lead.p3().mod()) * (sqrtS()-p4lead.E()+p4lead.p3().mod()) );
// Fill SD (and escape) if Mx is sufficiently low
if (Mx < 200*GeV) {
- _h_xsec_sd->fill(sqrtS()/GeV, weight);
+ _h_xsec_sd->fill(sqrtS()/GeV);
return;
}
// Also remove SD-like events in NSD events
if (fuzzyEquals(gapbwd, gapmax) || fuzzyEquals(gapfwd, gapmax)) vetoEvent;
// Fill DD plots
- if (gapmax > 3) _h_xsec_dd->fill(sqrtS()/GeV, weight);
+ if (gapmax > 3) _h_xsec_dd->fill(sqrtS()/GeV);
}
void finalize() {
// get the ratio plots: SD/inel, DD/inel
divide(_h_xsec_sd , _h_xsec_inel, _h_frac_sd_inel);
divide(_h_xsec_sd , _h_xsec_inel, _h_frac_dd_inel);
const double scaling = crossSection()/millibarn/sumOfWeights();
scale(_h_xsec_sd, scaling);
scale(_h_xsec_dd, scaling);
scale(_h_xsec_inel, scaling);
}
private:
Scatter2DPtr _h_frac_sd_inel;
Scatter2DPtr _h_frac_dd_inel;
Histo1DPtr _h_xsec_sd;
Histo1DPtr _h_xsec_dd;
Histo1DPtr _h_xsec_inel;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2012_I1181770);
}
diff --git a/analyses/pluginALICE/ALICE_2014_I1300380.cc b/analyses/pluginALICE/ALICE_2014_I1300380.cc
--- a/analyses/pluginALICE/ALICE_2014_I1300380.cc
+++ b/analyses/pluginALICE/ALICE_2014_I1300380.cc
@@ -1,120 +1,119 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
class ALICE_2014_I1300380 : public Analysis {
public:
ALICE_2014_I1300380()
: Analysis("ALICE_2014_I1300380")
{}
public:
void init() {
const UnstableFinalState cfs(Cuts::absrap<0.5);
declare(cfs, "CFS");
// Plots from the paper
book(_histPtSigmaStarPlus ,"d01-x01-y01"); // Sigma*+
book(_histPtSigmaStarMinus ,"d01-x01-y02"); // Sigma*-
book(_histPtSigmaStarPlusAnti ,"d01-x01-y03"); // anti Sigma*-
book(_histPtSigmaStarMinusAnti ,"d01-x01-y04"); // anti Sigma*+
book(_histPtXiStar ,"d02-x01-y01"); // 0.5 * (xi star + anti xi star)
book(_histAveragePt ,"d03-x01-y01"); // <pT> profile
}
void analyze(const Event& event) {
- const double weight = 1.0;
const UnstableFinalState& cfs = apply<UnstableFinalState>(event, "CFS");
foreach (const Particle& p, cfs.particles()) {
// protections against mc generators decaying long-lived particles
if ( !(p.hasAncestor(310) || p.hasAncestor(-310) || // K0s
p.hasAncestor(130) || p.hasAncestor(-130) || // K0l
p.hasAncestor(3322) || p.hasAncestor(-3322) || // Xi0
p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda
p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/-
p.hasAncestor(3312) || p.hasAncestor(-3312) || // Xi-/+
p.hasAncestor(3334) || p.hasAncestor(-3334) )) // Omega-/+
{
int aid = abs(p.pdgId());
if (aid == 211 || // pi+
aid == 321 || // K+
aid == 313 || // K*(892)0
aid == 2212 || // proton
aid == 333 ) { // phi(1020)
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
}
} // end if "rejection of long-lived particles"
switch (p.pdgId()) {
case 3224:
- _histPtSigmaStarPlus->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtSigmaStarPlus->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -3224:
- _histPtSigmaStarPlusAnti->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtSigmaStarPlusAnti->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case 3114:
- _histPtSigmaStarMinus->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtSigmaStarMinus->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -3114:
- _histPtSigmaStarMinusAnti->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtSigmaStarMinusAnti->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case 3324:
- _histPtXiStar->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtXiStar->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -3324:
- _histPtXiStar->fill(p.pT()/GeV, weight);
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histPtXiStar->fill(p.pT()/GeV);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case 3312:
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -3312:
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case 3334:
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
case -3334:
- _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV, weight);
+ _histAveragePt->fill(p.mass()/GeV, p.pT()/GeV);
break;
}
}
}
void finalize() {
scale(_histPtSigmaStarPlus, 1./sumOfWeights());
scale(_histPtSigmaStarPlusAnti, 1./sumOfWeights());
scale(_histPtSigmaStarMinus, 1./sumOfWeights());
scale(_histPtSigmaStarMinusAnti, 1./sumOfWeights());
scale(_histPtXiStar, 1./sumOfWeights()/ 2.);
}
private:
// plots from the paper
Histo1DPtr _histPtSigmaStarPlus;
Histo1DPtr _histPtSigmaStarPlusAnti;
Histo1DPtr _histPtSigmaStarMinus;
Histo1DPtr _histPtSigmaStarMinusAnti;
Histo1DPtr _histPtXiStar;
Profile1DPtr _histAveragePt;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2014_I1300380);
}
diff --git a/analyses/pluginALICE/ALICE_2015_I1357424.cc b/analyses/pluginALICE/ALICE_2015_I1357424.cc
--- a/analyses/pluginALICE/ALICE_2015_I1357424.cc
+++ b/analyses/pluginALICE/ALICE_2015_I1357424.cc
@@ -1,99 +1,98 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2015_I1357424 : public Analysis {
public:
ALICE_2015_I1357424()
: Analysis("ALICE_2015_I1357424")
{}
public:
void init() {
const ChargedFinalState cfs(Cuts::absrap<0.5);
declare(cfs, "CFS");
//
// plots from the paper
book(_histPtPions ,"d01-x01-y01"); // pions
book(_histPtKaons ,"d01-x01-y02"); // kaons
book(_histPtProtons ,"d01-x01-y03"); // protons
book(_histPtKtoPi ,"d02-x01-y01"); // K to pi ratio
book(_histPtPtoPi ,"d03-x01-y01"); // p to pi ratio
//
// temp histos for ratios
book(_histPtPionsR1 ,"TMP/pT_pi1", refData(2, 1, 1)); // pi histo compatible with more restricted kaon binning
book(_histPtPionsR2 ,"TMP/pT_pi2", refData(3, 1, 1)); // pi histo compatible with more restricted proton binning
book(_histPtKaonsR ,"TMP/pT_K", refData(2, 1, 1)); // K histo with more restricted binning
book(_histPtProtonsR ,"TMP/pT_p", refData(3, 1, 1)); // p histo with more restricted binning
}
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
foreach (const Particle& p, cfs.particles()) {
// protections against mc generators decaying long-lived particles
if ( !(p.hasAncestor(310) || p.hasAncestor(-310) || // K0s
p.hasAncestor(130) || p.hasAncestor(-130) || // K0l
p.hasAncestor(3322) || p.hasAncestor(-3322) || // Xi0
p.hasAncestor(3122) || p.hasAncestor(-3122) || // Lambda
p.hasAncestor(3222) || p.hasAncestor(-3222) || // Sigma+/-
p.hasAncestor(3312) || p.hasAncestor(-3312) || // Xi-/+
p.hasAncestor(3334) || p.hasAncestor(-3334) )) // Omega-/+
{
switch (abs(p.pid())) {
case 211: // pi+
- _histPtPions->fill(p.pT()/GeV, weight);
- _histPtPionsR1->fill(p.pT()/GeV, weight);
- _histPtPionsR2->fill(p.pT()/GeV, weight);
+ _histPtPions->fill(p.pT()/GeV);
+ _histPtPionsR1->fill(p.pT()/GeV);
+ _histPtPionsR2->fill(p.pT()/GeV);
break;
case 2212: // proton
- _histPtProtons->fill(p.pT()/GeV, weight);
- _histPtProtonsR->fill(p.pT()/GeV, weight);
+ _histPtProtons->fill(p.pT()/GeV);
+ _histPtProtonsR->fill(p.pT()/GeV);
break;
case 321: // K+
- _histPtKaons->fill(p.pT()/GeV, weight);
- _histPtKaonsR->fill(p.pT()/GeV, weight);
+ _histPtKaons->fill(p.pT()/GeV);
+ _histPtKaonsR->fill(p.pT()/GeV);
break;
} // particle switch
} // primary pi, K, p only
} // particle loop
}
void finalize() {
divide(_histPtKaonsR, _histPtPionsR1, _histPtKtoPi);
divide(_histPtProtonsR, _histPtPionsR2, _histPtPtoPi);
scale(_histPtPions, 1./sumOfWeights());
scale(_histPtProtons, 1./sumOfWeights());
scale(_histPtKaons, 1./sumOfWeights());
}
private:
Histo1DPtr _histPtPions;
Histo1DPtr _histPtProtons;
Histo1DPtr _histPtKaons;
Histo1DPtr _histPtPionsR1;
Histo1DPtr _histPtPionsR2;
Histo1DPtr _histPtProtonsR;
Histo1DPtr _histPtKaonsR;
Scatter2DPtr _histPtKtoPi;
Scatter2DPtr _histPtPtoPi;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2015_I1357424);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1124167.cc b/analyses/pluginATLAS/ATLAS_2012_I1124167..cc.needstransversesphericity
rename from analyses/pluginATLAS/ATLAS_2012_I1124167.cc
rename to analyses/pluginATLAS/ATLAS_2012_I1124167..cc.needstransversesphericity
diff --git a/analyses/pluginATLAS/ATLAS_2014_I1279489.cc b/analyses/pluginATLAS/ATLAS_2014_I1279489.cc.getscatter2d
rename from analyses/pluginATLAS/ATLAS_2014_I1279489.cc
rename to analyses/pluginATLAS/ATLAS_2014_I1279489.cc.getscatter2d
diff --git a/analyses/pluginCDF/CDF_1988_S1865951.cc b/analyses/pluginCDF/CDF_1988_S1865951.cc
--- a/analyses/pluginCDF/CDF_1988_S1865951.cc
+++ b/analyses/pluginCDF/CDF_1988_S1865951.cc
@@ -1,85 +1,85 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
namespace Rivet {
/// @brief CDF track \f$ p_\perp \f$ distributions at 630 and 1800 GeV
class CDF_1988_S1865951 : public Analysis {
public:
/// Constructor
CDF_1988_S1865951()
: Analysis("CDF_1988_S1865951")
- {
- _sumWTrig = 0;
- }
+ {}
/// @name Analysis methods
//@{
/// Book histograms and set up projections
void init() {
// Set up projections
declare(TriggerCDFRun0Run1(), "Trigger");
const ChargedFinalState cfs(-1.0, 1.0, 0.4*GeV);
declare(cfs, "CFS");
// Book histo
if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) {
book(_hist_pt ,1, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) {
book(_hist_pt ,2, 1, 1);
}
+
+ book(_sumWTrig, "sumWTrig");
+
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
- const double weight = 1.0;
- _sumWTrig += weight;
+ _sumWTrig->fill();
const FinalState& trackfs = apply<ChargedFinalState>(event, "CFS");
foreach (Particle p, trackfs.particles()) {
const double pt = p.pT()/GeV;
// Effective weight for d3sig/dp3 = weight / ( Delta eta * 2pi * pt ), with Delta(eta) = 2
- const double eff_weight = weight/(2*2*TWOPI*pt);
+ const double eff_weight = 1.0/(2*2*TWOPI*pt);
_hist_pt->fill(pt, eff_weight);
}
}
/// Scale histos
void finalize() {
scale(_hist_pt, crossSectionPerEvent()/millibarn);
}
//@}
private:
/// @name Counters
//@{
- double _sumWTrig;
+ CounterPtr _sumWTrig;
//@}
/// @name Histos
//@{
Histo1DPtr _hist_pt;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1988_S1865951);
}
diff --git a/analyses/pluginCDF/CDF_1990_S2089246.cc b/analyses/pluginCDF/CDF_1990_S2089246.cc
--- a/analyses/pluginCDF/CDF_1990_S2089246.cc
+++ b/analyses/pluginCDF/CDF_1990_S2089246.cc
@@ -1,85 +1,84 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
namespace Rivet {
/// @brief CDF pseudorapidity analysis at 630 and 1800 GeV
/// @author Andy Buckley
class CDF_1990_S2089246 : public Analysis {
public:
/// Constructor
CDF_1990_S2089246()
: Analysis("CDF_1990_S2089246")
{
- _sumWTrig = 0;
}
/// @name Analysis methods
//@{
void init() {
// Setup projections
declare(TriggerCDFRun0Run1(), "Trigger");
declare(ChargedFinalState(-3.5, 3.5), "CFS");
// Book histo
if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) {
book(_hist_eta ,3, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) {
book(_hist_eta ,4, 1, 1);
}
+ book(_sumWTrig, "sumWTrig");
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
- const double weight = 1.0;
- _sumWTrig += weight;
+ _sumWTrig->fill();
// Loop over final state charged particles to fill eta histos
const FinalState& fs = apply<FinalState>(event, "CFS");
foreach (const Particle& p, fs.particles()) {
const double eta = p.eta();
- _hist_eta->fill(fabs(eta), weight);
+ _hist_eta->fill(fabs(eta));
}
}
/// Finalize
void finalize() {
// Divide through by num events to get d<N>/d(eta) in bins
// Factor of 1/2 for |eta| -> eta
scale(_hist_eta, 0.5/_sumWTrig);
}
//@}
private:
/// @name Weight counter
//@{
- double _sumWTrig;
+ CounterPtr _sumWTrig;
//@}
/// @name Histogram collections
//@{
Histo1DPtr _hist_eta;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1990_S2089246);
}
diff --git a/analyses/pluginCDF/CDF_1993_S2742446.cc b/analyses/pluginCDF/CDF_1993_S2742446.cc
--- a/analyses/pluginCDF/CDF_1993_S2742446.cc
+++ b/analyses/pluginCDF/CDF_1993_S2742446.cc
@@ -1,110 +1,108 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
namespace Rivet {
/// @brief CDF <what is this analysis doing?>
class CDF_1993_S2742446 : public Analysis {
public:
CDF_1993_S2742446()
: Analysis("CDF_1993_S2742446")
{ }
public:
void init() {
// The photon selection has been corrected to pTmin=22 GeV (vs. 23 in the trigger)
LeadingParticlesFinalState photonfs(FinalState(-0.9, 0.9, 22.0*GeV));
photonfs.addParticleId(PID::PHOTON);
declare(photonfs, "LeadingPhoton");
// FS excluding the leading photon
VetoedFinalState vfs(FinalState(-4.2, 4.2));
vfs.addVetoOnThisFinalState(photonfs);
declare(vfs, "VFS");
// Jets
declare(FastJets(vfs, FastJets::CDFJETCLU, 0.7), "Jets");
book(_h_costheta ,1, 1, 1);
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
Particles photons = apply<LeadingParticlesFinalState>(event, "LeadingPhoton").particles();
if (photons.size()!=1 || photons[0].pT()>45.0*GeV) {
vetoEvent;
}
FourMomentum leadingPhoton = photons[0].momentum();
double eta_P = leadingPhoton.eta();
double phi_P = leadingPhoton.phi();
// photon isolation: less than 2 GeV EM E_T
double Etsum=0.0;
foreach (const Particle& p, apply<VetoedFinalState>(event, "VFS").particles()) {
if (p.charge() != 0 && deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.7) Etsum += p.Et();
}
if (Etsum > 2*GeV) vetoEvent;
FourMomentum jetsum;
Jets jets = apply<FastJets>(event, "Jets").jets(Cuts::pT > 10*GeV, cmpMomByPt);
// Require at least one jet with pT>10 GeV
if (jets.size()==0) vetoEvent;
// Require the leading jet to be in the opposite (phi) hemisphere w.r.t. the photon
if (jets[0].phi() - phi_P <= M_PI) vetoEvent;
// sum all jets in the opposite hemisphere in phi from the photon
foreach (const Jet& jet, jets) {
if (fabs(jet.phi()-phi_P) > M_PI) jetsum+=jet.momentum();
}
// c.m. cuts, see Table 1
double etaboost = (jetsum.eta()+eta_P)/2.;
if (!inRange(etaboost, -1.2, 1.2)) vetoEvent;
double etastar = (jetsum.eta()-eta_P)/2.;
if (!inRange(etastar, -1.1, 1.1)) vetoEvent;
double pstar = photons[0].pT()*cosh(etastar);
if (!inRange(pstar, 27.8, 47.0)) vetoEvent;
const double costheta = fabs(tanh((eta_P-jetsum.eta())/2.0));
if (!inRange(costheta, 0., 0.8)) vetoEvent;
// Fill histo
- _h_costheta->fill(costheta, weight);
+ _h_costheta->fill(costheta);
}
void finalize() {
/// @todo Take fixed norm direct from ref histo
normalize(_h_costheta, 1.4271); // fixed norm ok
}
private:
Histo1DPtr _h_costheta;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1993_S2742446);
}
diff --git a/analyses/pluginCDF/CDF_1996_S3108457.cc b/analyses/pluginCDF/CDF_1996_S3108457.cc
--- a/analyses/pluginCDF/CDF_1996_S3108457.cc
+++ b/analyses/pluginCDF/CDF_1996_S3108457.cc
@@ -1,123 +1,121 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/SmearedJets.hh"
namespace Rivet {
/// @brief CDF properties of high-mass multi-jet events
class CDF_1996_S3108457 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_1996_S3108457()
: Analysis("CDF_1996_S3108457")
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
/// Initialise and register projections here
const FinalState fs(-4.2, 4.2);
FastJets fj(fs, FastJets::CDFJETCLU, 0.7);
declare(fj, "Jets");
// Smear Energy and mass with the 10% uncertainty quoted in the paper
SmearedJets sj_E(fj, [](const Jet& jet){ return P4_SMEAR_MASS_GAUSS(P4_SMEAR_E_GAUSS(jet, 0.1*jet.E()), 0.1*jet.mass()); });
declare(sj_E, "SmearedJets_E");
/// Book histograms here, e.g.:
for (size_t i=0; i<5; ++i) {
book(_h_m[i] ,1+i, 1, 1);
book(_h_costheta[i] ,10+i, 1, 1);
book(_h_pT[i] ,15+i, 1, 1);
}
/// @todo Ratios of mass histograms left out: Binning doesn't work out
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
// Get the smeared jets
Jets SJets = apply<JetAlg>(event, "SmearedJets_E").jets(Cuts::Et > 20.0*GeV, cmpMomByEt);
if (SJets.size() < 2 || SJets.size() > 6) vetoEvent;
// Calculate Et, total jet 4 Momentum
double sumEt(0), sumE(0);
FourMomentum JS(0,0,0,0);
foreach(const Jet& jet, SJets) {
sumEt += jet.Et()*GeV;
sumE += jet.E()*GeV;
JS+=jet.momentum();
}
if (sumEt < 420*GeV || sumE > 2000*GeV) vetoEvent;
double mass = JS.mass();
LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(JS.betaVec());
FourMomentum jet0boosted(cms_boost.transform(SJets[0].momentum()));
double costheta0 = fabs(cos(jet0boosted.theta()));
if (costheta0 < 2.0/3.0) {
- _h_m[SJets.size()-2]->fill(mass, weight);
+ _h_m[SJets.size()-2]->fill(mass);
}
- if (mass > 600.0*GeV) _h_costheta[JS.size()-2]->fill(costheta0, weight);
+ if (mass > 600.0*GeV) _h_costheta[JS.size()-2]->fill(costheta0);
if (costheta0 < 2.0/3.0 && mass > 600.0*GeV) {
foreach (const Jet& jet, SJets) {
- _h_pT[SJets.size()-2]->fill(jet.pT(), weight);
+ _h_pT[SJets.size()-2]->fill(jet.pT());
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
/// Normalise, scale and otherwise manipulate histograms here
for (size_t i=0; i<5; ++i) {
normalize(_h_m[i], 40.0);
normalize(_h_costheta[i], 2.0);
normalize(_h_pT[i], 20.0);
}
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_m[5];
Histo1DPtr _h_costheta[5];
Histo1DPtr _h_pT[5];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1996_S3108457);
}
diff --git a/analyses/pluginCDF/CDF_1996_S3349578.cc b/analyses/pluginCDF/CDF_1996_S3349578.cc
--- a/analyses/pluginCDF/CDF_1996_S3349578.cc
+++ b/analyses/pluginCDF/CDF_1996_S3349578.cc
@@ -1,456 +1,455 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/SmearedJets.hh"
namespace Rivet {
/// @brief CDF properties of high-mass multi-jet events
class CDF_1996_S3349578 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_1996_S3349578()
: Analysis("CDF_1996_S3349578")
{
}
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
/// Initialise and register projections here
const FinalState fs(-4.2, 4.2);
FastJets fj(fs, FastJets::CDFJETCLU, 0.7);
declare(fj, "Jets");
// Smear Energy and mass with the 10% uncertainty quoted in the paper
SmearedJets sj_E(fj, [](const Jet& jet){ return P4_SMEAR_MASS_GAUSS(P4_SMEAR_E_GAUSS(jet, 0.1*jet.E()), 0.1*jet.mass()); });
declare(sj_E, "SmearedJets");
/// Book histograms here, e.g.:
book(_h_3_mNJ ,1, 1, 1);
book(_h_3_X3 ,2, 1, 1);
book(_h_3_X4 ,3, 1, 1);
book(_h_3_costheta3 ,8, 1, 1);
book(_h_3_psi3 ,9, 1, 1);
book(_h_3_f3 ,14, 1, 1);
book(_h_3_f4 ,14, 1, 2);
book(_h_3_f5 ,14, 1, 3);
book(_h_4_mNJ ,1, 1, 2);
book(_h_4_X3 ,4, 1, 1);
book(_h_4_X4 ,5, 1, 1);
book(_h_4_costheta3 ,10, 1, 1);
book(_h_4_psi3 ,11, 1, 1);
book(_h_4_f3 ,15, 1, 1);
book(_h_4_f4 ,15, 1, 2);
book(_h_4_f5 ,15, 1, 3);
book(_h_4_XA ,17, 1, 1);
book(_h_4_psiAB ,19, 1, 1);
book(_h_4_fA ,21, 1, 1);
book(_h_4_fB ,21, 1, 2);
book(_h_5_mNJ ,1, 1, 3);
book(_h_5_X3 ,6, 1, 1);
book(_h_5_X4 ,7, 1, 1);
book(_h_5_costheta3 ,12, 1, 1);
book(_h_5_psi3 ,13, 1, 1);
book(_h_5_f3 ,16, 1, 1);
book(_h_5_f4 ,16, 1, 2);
book(_h_5_f5 ,16, 1, 3);
book(_h_5_XA ,18, 1, 1);
book(_h_5_XC ,18, 1, 2);
book(_h_5_psiAB ,20, 1, 1);
book(_h_5_psiCD ,20, 1, 2);
book(_h_5_fA ,22, 1, 1);
book(_h_5_fB ,23, 1, 1);
book(_h_5_fC ,24, 1, 1);
book(_h_5_fD ,25, 1, 1);
}
void analyze(const Event& event) {
Jets jets;
FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0);
foreach (const Jet& jet, apply<JetAlg>(event, "SmearedJets").jets(Cuts::Et > 20.0*GeV, cmpMomByEt)) {
bool separated = true;
foreach (const Jet& ref, jets) {
if (deltaR(jet, ref) < 0.9) {
separated = false;
break;
}
}
if (!separated) continue;
jets.push_back(jet);
jetsystem += jet.momentum();
if (jets.size() >= 5) break;
}
- const double weight = 1.0;
if (jets.size() > 4) {
- _fiveJetAnalysis(jets, weight);
+ _fiveJetAnalysis(jets);
jets.resize(4);
}
if (jets.size() > 3) {
- _fourJetAnalysis(jets, weight);
+ _fourJetAnalysis(jets);
jets.resize(3);
}
if (jets.size() > 2) {
- _threeJetAnalysis(jets, weight);
+ _threeJetAnalysis(jets);
}
}
- void _threeJetAnalysis(const Jets& jets, const double& weight) {
+ void _threeJetAnalysis(const Jets& jets) {
MSG_DEBUG("3 jet analysis");
double sumEt = 0.0;
FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0);
foreach (const Jet& jet, jets) {
sumEt += jet.Et();
jetsystem += jet.momentum();
}
if (sumEt < 420.0*GeV) return;
const double m3J = _safeMass(jetsystem);
if (m3J < 600*GeV) return;
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec());
vector<FourMomentum> jets3;
foreach (Jet jet, jets) {
jets3.push_back(cms_boost.transform(jet.momentum()));
}
std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending());
FourMomentum p3(jets3[0]), p4(jets3[1]), p5(jets3[2]);
FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m3J, jetsystem.rapidity()));
double costheta3 = pAV.p3().unit().dot(p3.p3().unit());
if (fabs(costheta3) > 0.6) return;
double X3 = 2.0*p3.E()/m3J;
if (X3 > 0.9) return;
const double X4 = 2.0*p4.E()/m3J;
const double psi3 = _psi(p3, pAV, p4, p5);
const double f3 = _safeMass(p3)/m3J;
const double f4 = _safeMass(p4)/m3J;
const double f5 = _safeMass(p5)/m3J;
- _h_3_mNJ->fill(m3J, weight);
- _h_3_X3->fill(X3, weight);
- _h_3_X4->fill(X4, weight);
- _h_3_costheta3->fill(costheta3, weight);
- _h_3_psi3->fill(psi3, weight);
- _h_3_f3->fill(f3, weight);
- _h_3_f4->fill(f4, weight);
- _h_3_f5->fill(f5, weight);
+ _h_3_mNJ->fill(m3J);
+ _h_3_X3->fill(X3);
+ _h_3_X4->fill(X4);
+ _h_3_costheta3->fill(costheta3);
+ _h_3_psi3->fill(psi3);
+ _h_3_f3->fill(f3);
+ _h_3_f4->fill(f4);
+ _h_3_f5->fill(f5);
}
- void _fourJetAnalysis(const Jets& jets, const double& weight) {
+ void _fourJetAnalysis(const Jets& jets) {
MSG_DEBUG("4 jet analysis");
double sumEt=0.0;
FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0);
foreach (const Jet& jet, jets) {
sumEt+=jet.Et();
jetsystem+=jet.momentum();
}
if (sumEt < 420.0*GeV) return;
const double m4J = _safeMass(jetsystem);
if (m4J < 650*GeV) return;
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec());
vector<FourMomentum> jets4;
foreach (Jet jet, jets) {
jets4.push_back(cms_boost.transform(jet.momentum()));
}
std::sort(jets4.begin(), jets4.end(), FourMomentum::byEDescending());
FourMomentum pA, pB;
vector<FourMomentum> jets3(_reduce(jets4, pA, pB));
std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending());
FourMomentum p3(jets3[0]);
FourMomentum p4(jets3[1]);
FourMomentum p5(jets3[2]);
FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m4J, jetsystem.rapidity()));
double costheta3=pAV.p3().unit().dot(p3.p3().unit());
if (fabs(costheta3)>0.8) {
return;
}
const double X3 = 2.0*p3.E()/m4J;
if (X3>0.9) {
return;
}
// fill histograms
const double X4 = 2.0*p4.E()/m4J;
const double psi3 = _psi(p3, pAV, p4, p5);
const double f3 = _safeMass(p3)/m4J;
const double f4 = _safeMass(p4)/m4J;
const double f5 = _safeMass(p5)/m4J;
const double fA = _safeMass(pA)/m4J;
const double fB = _safeMass(pB)/m4J;
const double XA = pA.E()/(pA.E()+pB.E());
const double psiAB = _psi(pA, pB, pA+pB, pAV);
- _h_4_mNJ->fill(m4J, weight);
- _h_4_X3->fill(X3, weight);
- _h_4_X4->fill(X4, weight);
- _h_4_costheta3->fill(costheta3, weight);
- _h_4_psi3->fill(psi3, weight);
- _h_4_f3->fill(f3, weight);
- _h_4_f4->fill(f4, weight);
- _h_4_f5->fill(f5, weight);
- _h_4_XA->fill(XA, weight);
- _h_4_psiAB->fill(psiAB, weight);
- _h_4_fA->fill(fA, weight);
- _h_4_fB->fill(fB, weight);
+ _h_4_mNJ->fill(m4J);
+ _h_4_X3->fill(X3);
+ _h_4_X4->fill(X4);
+ _h_4_costheta3->fill(costheta3);
+ _h_4_psi3->fill(psi3);
+ _h_4_f3->fill(f3);
+ _h_4_f4->fill(f4);
+ _h_4_f5->fill(f5);
+ _h_4_XA->fill(XA);
+ _h_4_psiAB->fill(psiAB);
+ _h_4_fA->fill(fA);
+ _h_4_fB->fill(fB);
}
- void _fiveJetAnalysis(const Jets& jets, const double& weight) {
+ void _fiveJetAnalysis(const Jets& jets) {
MSG_DEBUG("5 jet analysis");
double sumEt=0.0;
FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0);
foreach (const Jet& jet, jets) {
sumEt+=jet.Et();
jetsystem+=jet.momentum();
}
if (sumEt < 420.0*GeV) return;
const double m5J = _safeMass(jetsystem);
if (m5J < 750*GeV) return;
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec());
vector<FourMomentum> jets5;
foreach (Jet jet, jets) {
jets5.push_back(cms_boost.transform(jet.momentum()));
}
std::sort(jets5.begin(), jets5.end(), FourMomentum::byEDescending());
FourMomentum pC, pD;
vector<FourMomentum> jets4(_reduce(jets5, pC, pD));
std::sort(jets4.begin(), jets4.end(), FourMomentum::byEDescending());
FourMomentum pA, pB;
vector<FourMomentum> jets3(_reduce(jets4, pA, pB));
std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending());
FourMomentum p3(jets3[0]);
FourMomentum p4(jets3[1]);
FourMomentum p5(jets3[2]);
// fill histograms
FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m5J, jetsystem.rapidity()));
const double costheta3 = pAV.p3().unit().dot(p3.p3().unit());
const double X3 = 2.0*p3.E()/m5J;
const double X4 = 2.0*p4.E()/m5J;
const double psi3 = _psi(p3, pAV, p4, p5);
const double f3 = _safeMass(p3)/m5J;
const double f4 = _safeMass(p4)/m5J;
const double f5 = _safeMass(p5)/m5J;
const double fA = _safeMass(pA)/m5J;
const double fB = _safeMass(pB)/m5J;
const double XA = pA.E()/(pA.E()+pB.E());
const double psiAB = _psi(pA, pB, pA+pB, pAV);
const double fC = _safeMass(pC)/m5J;
const double fD = _safeMass(pD)/m5J;
const double XC = pC.E()/(pC.E()+pD.E());
const double psiCD = _psi(pC, pD, pC+pD, pAV);
- _h_5_mNJ->fill(m5J, weight);
- _h_5_X3->fill(X3, weight);
- _h_5_X4->fill(X4, weight);
- _h_5_costheta3->fill(costheta3, weight);
- _h_5_psi3->fill(psi3, weight);
- _h_5_f3->fill(f3, weight);
- _h_5_f4->fill(f4, weight);
- _h_5_f5->fill(f5, weight);
- _h_5_XA->fill(XA, weight);
- _h_5_psiAB->fill(psiAB, weight);
- _h_5_fA->fill(fA, weight);
- _h_5_fB->fill(fB, weight);
- _h_5_XC->fill(XC, weight);
- _h_5_psiCD->fill(psiCD, weight);
- _h_5_fC->fill(fC, weight);
- _h_5_fD->fill(fD, weight);
+ _h_5_mNJ->fill(m5J);
+ _h_5_X3->fill(X3);
+ _h_5_X4->fill(X4);
+ _h_5_costheta3->fill(costheta3);
+ _h_5_psi3->fill(psi3);
+ _h_5_f3->fill(f3);
+ _h_5_f4->fill(f4);
+ _h_5_f5->fill(f5);
+ _h_5_XA->fill(XA);
+ _h_5_psiAB->fill(psiAB);
+ _h_5_fA->fill(fA);
+ _h_5_fB->fill(fB);
+ _h_5_XC->fill(XC);
+ _h_5_psiCD->fill(psiCD);
+ _h_5_fC->fill(fC);
+ _h_5_fD->fill(fD);
}
/// Normalise histograms etc., after the run
void finalize() {
/// Normalise, scale and otherwise manipulate histograms here
normalize(_h_3_mNJ, 1.0);
normalize(_h_3_X3, 1.0);
normalize(_h_3_X4, 1.0);
normalize(_h_3_costheta3, 1.0);
normalize(_h_3_psi3, 1.0);
normalize(_h_3_f3, 1.0);
normalize(_h_3_f4, 1.0);
normalize(_h_3_f5, 1.0);
normalize(_h_4_mNJ, 1.0);
normalize(_h_4_X3, 1.0);
normalize(_h_4_X4, 1.0);
normalize(_h_4_costheta3, 1.0);
normalize(_h_4_psi3, 1.0);
normalize(_h_4_f3, 1.0);
normalize(_h_4_f4, 1.0);
normalize(_h_4_f5, 1.0);
normalize(_h_4_XA, 1.0);
normalize(_h_4_psiAB, 1.0);
normalize(_h_4_fA, 1.0);
normalize(_h_4_fB, 1.0);
normalize(_h_5_mNJ, 1.0);
normalize(_h_5_X3, 1.0);
normalize(_h_5_X4, 1.0);
normalize(_h_5_costheta3, 1.0);
normalize(_h_5_psi3, 1.0);
normalize(_h_5_f3, 1.0);
normalize(_h_5_f4, 1.0);
normalize(_h_5_f5, 1.0);
normalize(_h_5_XA, 1.0);
normalize(_h_5_XC, 1.0);
normalize(_h_5_psiAB, 1.0);
normalize(_h_5_psiCD, 1.0);
normalize(_h_5_fA, 1.0);
normalize(_h_5_fB, 1.0);
normalize(_h_5_fC, 1.0);
normalize(_h_5_fD, 1.0);
}
//@}
private:
vector<FourMomentum> _reduce(const vector<FourMomentum>& jets,
FourMomentum& combined1,
FourMomentum& combined2) {
double minMass2 = 1e9;
size_t idx1(jets.size()), idx2(jets.size());
for (size_t i=0; i<jets.size(); ++i) {
for (size_t j=i+1; j<jets.size(); ++j) {
double mass2 = FourMomentum(jets[i]+jets[j]).mass2();
if (mass2<minMass2) {
idx1=i;
idx2=j;
}
}
}
vector<FourMomentum> newjets;
for (size_t i=0; i<jets.size(); ++i) {
if (i!=idx1 && i!=idx2) newjets.push_back(jets[i]);
}
newjets.push_back(jets[idx1]+jets[idx2]);
combined1 = jets[idx1];
combined2 = jets[idx2];
return newjets;
}
FourMomentum _avg_beam_in_lab(const double& m, const double& y) {
const double mt = m/2.0;
FourMomentum beam1(mt, 0, 0, mt);
FourMomentum beam2(mt, 0, 0, -mt);
if (fabs(y)>1e-3) {
FourMomentum boostvec(cosh(y), 0.0, 0.0, sinh(y));
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(boostvec.betaVec()).inverse();
beam1 = cms_boost.transform(beam1);
beam2 = cms_boost.transform(beam2);
}
return (beam1.E() > beam2.E()) ? beam1-beam2 : beam2-beam1;
}
double _psi(const FourMomentum& p1, const FourMomentum& p2,
const FourMomentum& p3, const FourMomentum& p4) {
Vector3 p1xp2 = p1.p3().cross(p2.p3());
Vector3 p3xp4 = p3.p3().cross(p4.p3());
return mapAngle0ToPi(acos(p1xp2.unit().dot(p3xp4.unit())));
}
double _safeMass(const FourMomentum& p) {
double mass2=p.mass2();
if (mass2>0.0) return sqrt(mass2);
else if (mass2<-1.0e-5) {
MSG_WARNING("m2 = " << m2 << ". Assuming m2=0.");
return 0.0;
}
else return 0.0;
}
private:
/// @name Histograms
//@{
Histo1DPtr _h_3_mNJ;
Histo1DPtr _h_3_X3;
Histo1DPtr _h_3_X4;
Histo1DPtr _h_3_costheta3;
Histo1DPtr _h_3_psi3;
Histo1DPtr _h_3_f3;
Histo1DPtr _h_3_f4;
Histo1DPtr _h_3_f5;
Histo1DPtr _h_4_mNJ;
Histo1DPtr _h_4_X3;
Histo1DPtr _h_4_X4;
Histo1DPtr _h_4_costheta3;
Histo1DPtr _h_4_psi3;
Histo1DPtr _h_4_f3;
Histo1DPtr _h_4_f4;
Histo1DPtr _h_4_f5;
Histo1DPtr _h_4_XA;
Histo1DPtr _h_4_psiAB;
Histo1DPtr _h_4_fA;
Histo1DPtr _h_4_fB;
Histo1DPtr _h_5_mNJ;
Histo1DPtr _h_5_X3;
Histo1DPtr _h_5_X4;
Histo1DPtr _h_5_costheta3;
Histo1DPtr _h_5_psi3;
Histo1DPtr _h_5_f3;
Histo1DPtr _h_5_f4;
Histo1DPtr _h_5_f5;
Histo1DPtr _h_5_XA;
Histo1DPtr _h_5_XC;
Histo1DPtr _h_5_psiAB;
Histo1DPtr _h_5_psiCD;
Histo1DPtr _h_5_fA;
Histo1DPtr _h_5_fB;
Histo1DPtr _h_5_fC;
Histo1DPtr _h_5_fD;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1996_S3349578);
}
diff --git a/analyses/pluginCDF/CDF_1997_S3541940.cc b/analyses/pluginCDF/CDF_1997_S3541940.cc
--- a/analyses/pluginCDF/CDF_1997_S3541940.cc
+++ b/analyses/pluginCDF/CDF_1997_S3541940.cc
@@ -1,245 +1,243 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/SmearedJets.hh"
namespace Rivet {
/// @brief CDF properties of 6-jet events with large 6-jet mass
class CDF_1997_S3541940 : public Analysis {
public:
CDF_1997_S3541940()
: Analysis("CDF_1997_S3541940")
{
}
public:
void init() {
const FinalState fs(-4.2, 4.2);
FastJets fj (fs, FastJets::CDFJETCLU, 0.7);
declare(fj, "Jets");
// Smear Energy and mass with the 10% uncertainty quoted in the paper
SmearedJets sj_E(fj, [](const Jet& jet){ return P4_SMEAR_MASS_GAUSS(P4_SMEAR_E_GAUSS(jet, 0.1*jet.E()), 0.1*jet.mass()); });
declare(sj_E, "SmearedJets");
book(_h_m6J ,1, 1, 1);
book(_h_X3ppp ,2, 1, 1);
book(_h_X4ppp ,3, 1, 1);
book(_h_costheta3ppp ,4, 1, 1);
book(_h_psi3ppp ,5, 1, 1);
book(_h_f3ppp ,6, 1, 1);
book(_h_f4ppp ,6, 1, 2);
book(_h_f5ppp ,6, 1, 3);
book(_h_XApp ,7, 1, 1);
book(_h_XCp ,8, 1, 1);
book(_h_XE ,9, 1, 1);
book(_h_psiAppBpp ,10, 1, 1);
book(_h_psiCpDp ,11, 1, 1);
book(_h_psiEF ,12, 1, 1);
book(_h_fApp ,13, 1, 1);
book(_h_fBpp ,14, 1, 1);
book(_h_fCp ,15, 1, 1);
book(_h_fDp ,16, 1, 1);
book(_h_fE ,17, 1, 1);
book(_h_fF ,18, 1, 1);
}
void analyze(const Event& event) {
Jets jets;
double sumEt = 0.0;
FourMomentum jetsystem(0.0, 0.0, 0.0, 0.0);
foreach (const Jet& jet, apply<JetAlg>(event, "SmearedJets").jets(Cuts::Et>20*GeV && Cuts::abseta<3,cmpMomByEt)) {
double Et = jet.Et();
bool separated = true;
foreach (const Jet& ref, jets) {
if (deltaR(jet, ref) < 0.9) {
separated = false;
break;
}
}
if (!separated) continue;
jets.push_back(jet);
sumEt += Et;
jetsystem += jet.momentum();
if (jets.size() >= 6) break;
}
if (jets.size() < 6) vetoEvent;
if (sumEt < 320.0*GeV) vetoEvent;
double m6J = _safeMass(jetsystem);
if (m6J < 520.0*GeV) vetoEvent;
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jetsystem.betaVec());
vector<FourMomentum> jets6;
foreach (Jet jet, jets) {
jets6.push_back(cms_boost.transform(jet.momentum()));
}
std::sort(jets6.begin(), jets6.end(), FourMomentum::byEDescending());
FourMomentum pE, pF;
vector<FourMomentum> jets5(_reduce(jets6, pE, pF));
std::sort(jets5.begin(), jets5.end(), FourMomentum::byEDescending());
FourMomentum pCp, pDp;
vector<FourMomentum> jets4(_reduce(jets5, pCp, pDp));
std::sort(jets4.begin(), jets4.end(), FourMomentum::byEDescending());
FourMomentum pApp, pBpp;
vector<FourMomentum> jets3(_reduce(jets4, pApp, pBpp));
std::sort(jets3.begin(), jets3.end(), FourMomentum::byEDescending());
FourMomentum p3ppp(jets3[0]);
FourMomentum p4ppp(jets3[1]);
FourMomentum p5ppp(jets3[2]);
double X3ppp = 2.0*p3ppp.E()/m6J;
if (X3ppp > 0.9) vetoEvent;
FourMomentum pAV = cms_boost.transform(_avg_beam_in_lab(m6J, jetsystem.rapidity()));
double costheta3ppp = pAV.p3().unit().dot(p3ppp.p3().unit());
if (fabs(costheta3ppp) > 0.9) vetoEvent;
- const double weight = 1.0;
-
// 3-jet-system variables
- _h_m6J->fill(m6J, weight);
- _h_X3ppp->fill(X3ppp, weight);
- _h_X4ppp->fill(2.0*p4ppp.E()/m6J, weight);
- _h_costheta3ppp->fill(costheta3ppp, weight);
+ _h_m6J->fill(m6J);
+ _h_X3ppp->fill(X3ppp);
+ _h_X4ppp->fill(2.0*p4ppp.E()/m6J);
+ _h_costheta3ppp->fill(costheta3ppp);
double psi3ppp = _psi(p3ppp, pAV, p4ppp, p5ppp);
- _h_psi3ppp->fill(psi3ppp, weight);
- _h_f3ppp->fill(_safeMass(p3ppp)/m6J, weight);
- _h_f4ppp->fill(_safeMass(p4ppp)/m6J, weight);
- _h_f5ppp->fill(_safeMass(p5ppp)/m6J, weight);
+ _h_psi3ppp->fill(psi3ppp);
+ _h_f3ppp->fill(_safeMass(p3ppp)/m6J);
+ _h_f4ppp->fill(_safeMass(p4ppp)/m6J);
+ _h_f5ppp->fill(_safeMass(p5ppp)/m6J);
// 4 -> 3 jet variables
- _h_fApp->fill(_safeMass(pApp)/m6J, weight);
- _h_fBpp->fill(_safeMass(pApp)/m6J, weight);
- _h_XApp->fill(pApp.E()/(pApp.E()+pBpp.E()), weight);
+ _h_fApp->fill(_safeMass(pApp)/m6J);
+ _h_fBpp->fill(_safeMass(pApp)/m6J);
+ _h_XApp->fill(pApp.E()/(pApp.E()+pBpp.E()));
double psiAppBpp = _psi(pApp, pBpp, pApp+pBpp, pAV);
- _h_psiAppBpp->fill(psiAppBpp, weight);
+ _h_psiAppBpp->fill(psiAppBpp);
// 5 -> 4 jet variables
- _h_fCp->fill(_safeMass(pCp)/m6J, weight);
- _h_fDp->fill(_safeMass(pDp)/m6J, weight);
- _h_XCp->fill(pCp.E()/(pCp.E()+pDp.E()), weight);
+ _h_fCp->fill(_safeMass(pCp)/m6J);
+ _h_fDp->fill(_safeMass(pDp)/m6J);
+ _h_XCp->fill(pCp.E()/(pCp.E()+pDp.E()));
double psiCpDp = _psi(pCp, pDp, pCp+pDp, pAV);
- _h_psiCpDp->fill(psiCpDp, weight);
+ _h_psiCpDp->fill(psiCpDp);
// 6 -> 5 jet variables
- _h_fE->fill(_safeMass(pE)/m6J, weight);
- _h_fF->fill(_safeMass(pF)/m6J, weight);
- _h_XE->fill(pE.E()/(pE.E()+pF.E()), weight);
+ _h_fE->fill(_safeMass(pE)/m6J);
+ _h_fF->fill(_safeMass(pF)/m6J);
+ _h_XE->fill(pE.E()/(pE.E()+pF.E()));
double psiEF = _psi(pE, pF, pE+pF, pAV);
- _h_psiEF->fill(psiEF, weight);
+ _h_psiEF->fill(psiEF);
}
void finalize() {
normalize(_h_m6J);
normalize(_h_X3ppp);
normalize(_h_X4ppp);
normalize(_h_costheta3ppp);
normalize(_h_psi3ppp);
normalize(_h_f3ppp);
normalize(_h_f4ppp);
normalize(_h_f5ppp);
normalize(_h_XApp);
normalize(_h_XCp);
normalize(_h_XE);
normalize(_h_psiAppBpp);
normalize(_h_psiCpDp);
normalize(_h_psiEF);
normalize(_h_fApp);
normalize(_h_fBpp);
normalize(_h_fCp);
normalize(_h_fDp);
normalize(_h_fE);
normalize(_h_fF);
}
private:
vector<FourMomentum> _reduce(const vector<FourMomentum>& jets,
FourMomentum& combined1,
FourMomentum& combined2) {
double minMass2 = 1e9;
size_t idx1(jets.size()), idx2(jets.size());
for (size_t i = 0; i < jets.size(); ++i) {
for (size_t j = i+1; j < jets.size(); ++j) {
double mass2 = FourMomentum(jets[i] + jets[j]).mass2();
if (mass2 < minMass2) {
idx1 = i;
idx2 = j;
}
}
}
vector<FourMomentum> newjets;
for (size_t i = 0; i < jets.size(); ++i) {
if (i != idx1 && i != idx2) newjets.push_back(jets[i]);
}
newjets.push_back(jets[idx1] + jets[idx2]);
combined1 = jets[idx1];
combined2 = jets[idx2];
return newjets;
}
FourMomentum _avg_beam_in_lab(const double& m, const double& y) {
const double mt = m/2.0;
FourMomentum beam1(mt, 0, 0, mt);
FourMomentum beam2(mt, 0, 0, -mt);
if (fabs(y) > 1e-3) {
FourMomentum boostvec(cosh(y), 0.0, 0.0, sinh(y));
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(boostvec.betaVec()).inverse();
beam1 = cms_boost.transform(beam1);
beam2 = cms_boost.transform(beam2);
}
return (beam1.E() > beam2.E()) ? beam1 - beam2 : beam2 - beam1;
}
double _psi(const FourMomentum& p1, const FourMomentum& p2,
const FourMomentum& p3, const FourMomentum& p4) {
Vector3 p1xp2 = p1.p3().cross(p2.p3());
Vector3 p3xp4 = p3.p3().cross(p4.p3());
return mapAngle0ToPi(acos(p1xp2.unit().dot(p3xp4.unit())));
}
double _safeMass(const FourMomentum& p) {
double mass2 = p.mass2();
if (mass2 > 0.0) return sqrt(mass2);
if (mass2 < -1e-5) MSG_WARNING("m2 = " << m2 << ". Assuming m2=0.");
return 0.0;
}
private:
Histo1DPtr _h_m6J;
Histo1DPtr _h_X3ppp, _h_X4ppp;
Histo1DPtr _h_costheta3ppp;
Histo1DPtr _h_psi3ppp;
Histo1DPtr _h_f3ppp, _h_f4ppp, _h_f5ppp;
Histo1DPtr _h_XApp, _h_XCp, _h_XE;
Histo1DPtr _h_psiAppBpp, _h_psiCpDp, _h_psiEF;
Histo1DPtr _h_fApp, _h_fBpp, _h_fCp, _h_fDp, _h_fE, _h_fF;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1997_S3541940);
}
diff --git a/analyses/pluginCDF/CDF_1998_S3618439.cc b/analyses/pluginCDF/CDF_1998_S3618439.cc
--- a/analyses/pluginCDF/CDF_1998_S3618439.cc
+++ b/analyses/pluginCDF/CDF_1998_S3618439.cc
@@ -1,78 +1,76 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief CDF diff cross-section in events with large missing energy
class CDF_1998_S3618439 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_1998_S3618439()
: Analysis("CDF_1998_S3618439")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs(-4.2, 4.2);
declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets");
book(_h_sumET_20 ,1, 1, 1);
book(_h_sumET_100 ,1, 1, 2);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
Jets jets = apply<FastJets>(event, "Jets").jets(Cuts::Et > 20*GeV, cmpMomByEt);
double sumET_20(0.0), sumET_100(0.0);
foreach (const Jet& jet, jets) {
double ET = jet.Et()/GeV;
sumET_20 += ET;
if (ET > 100.0) sumET_100 += ET;
}
- if (sumET_20 > 320.0) _h_sumET_20->fill(sumET_20, weight);
- if (sumET_100 > 320.0) _h_sumET_100->fill(sumET_100, weight);
+ if (sumET_20 > 320.0) _h_sumET_20->fill(sumET_20);
+ if (sumET_100 > 320.0) _h_sumET_100->fill(sumET_100);
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_sumET_20, crossSection()/picobarn/sumOfWeights());
scale(_h_sumET_100, crossSection()/picobarn/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_sumET_20, _h_sumET_100;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1998_S3618439);
}
diff --git a/analyses/pluginCDF/CDF_2000_S4266730.cc b/analyses/pluginCDF/CDF_2000_S4266730.cc
--- a/analyses/pluginCDF/CDF_2000_S4266730.cc
+++ b/analyses/pluginCDF/CDF_2000_S4266730.cc
@@ -1,74 +1,74 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief CDF dijet cross-section, differential in dijet mass
class CDF_2000_S4266730 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_2000_S4266730()
: Analysis("CDF_2000_S4266730")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs(-4.2, 4.2);
declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets");
book(_h_mjj ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
Jets jets = apply<FastJets>(event, "Jets").jets(cmpMomByEt);
if (jets.size() < 2) vetoEvent;
FourMomentum jet1 = jets[0].momentum();
FourMomentum jet2 = jets[1].momentum();
double eta1 = jet1.eta();
double eta2 = jet2.eta();
if (fabs(eta1) > 2.0 || fabs(eta2) > 2.0) vetoEvent;
if (fabs(tanh((eta1-eta2)/2)) > 2.0/3.0) vetoEvent;
double mjj = FourMomentum(jet1+jet2).mass()/GeV;
if (mjj < 180) vetoEvent;
- _h_mjj->fill(mjj, 1.0);
+ _h_mjj->fill(mjj);
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_mjj, crossSection()/picobarn/sumOfWeights());
}
//@}
private:
/// Histogram
Histo1DPtr _h_mjj;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2000_S4266730);
}
diff --git a/analyses/pluginCDF/CDF_2001_S4563131.cc b/analyses/pluginCDF/CDF_2001_S4563131.cc
--- a/analyses/pluginCDF/CDF_2001_S4563131.cc
+++ b/analyses/pluginCDF/CDF_2001_S4563131.cc
@@ -1,71 +1,70 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief CDF Run I inclusive jet cross-section
class CDF_2001_S4563131 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_2001_S4563131()
: Analysis("CDF_2001_S4563131")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs(-4.2, 4.2);
declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets");
book(_h_ET ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
Jets jets = apply<FastJets>(event, "Jets").jets(Cuts::Et > 40*GeV && Cuts::abseta >= 0.1 && Cuts::abseta <= 0.7, cmpMomByEt);
foreach (const Jet& jet, jets) {
//if (inRange(jet.abseta(), 0.1, 0.7))
- _h_ET->fill(jet.Et(), weight);
+ _h_ET->fill(jet.Et());
}
}
/// Normalise histograms etc., after the run
void finalize() {
const double deta = 1.2;
scale(_h_ET, crossSection()/sumOfWeights()/deta/nanobarn);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_ET;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2001_S4563131);
}
diff --git a/analyses/pluginCDF/CDF_2001_S4751469.cc b/analyses/pluginCDF/CDF_2001_S4751469.cc
--- a/analyses/pluginCDF/CDF_2001_S4751469.cc
+++ b/analyses/pluginCDF/CDF_2001_S4751469.cc
@@ -1,260 +1,264 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
#include "Rivet/Projections/ConstLossyFinalState.hh"
//#include "Rivet/Projections/SmearedParticles.hh"
namespace Rivet {
/// @brief Field-Stuart CDF Run I track-jet underlying event analysis
///
/// @author Andy Buckley
///
/// The "original" underlying event analysis, using a non-standard track-jet algorithm.
///
/// @par Run conditions
///
/// @arg \f$ \sqrt{s} = \f$ 1800 GeV
/// @arg Run with generic QCD events.
/// @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the profile histograms:
/// @arg \f$ p_\perp^\text{min} = \f$ 0 (min bias), 10, 20 GeV
class CDF_2001_S4751469 : public Analysis {
public:
/// Constructor: cuts on final state are \f$ -1 < \eta < 1 \f$
/// and \f$ p_T > 0.5 \f$ GeV.
CDF_2001_S4751469()
- : Analysis("CDF_2001_S4751469"),
- _totalNumTrans2(0), _totalNumTrans5(0), _totalNumTrans30(0),
- _sumWeightsPtLead2(0),_sumWeightsPtLead5(0), _sumWeightsPtLead30(0)
+ : Analysis("CDF_2001_S4751469")
{ }
/// @name Analysis methods
//@{
// Book histograms
void init() {
declare(TriggerCDFRun0Run1(), "Trigger");
// Randomly discard 8% of charged particles as a kind of hacky detector correction.
const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV);
/// @todo Replace ConstLossyFinalState with SmearedParticles
const ConstLossyFinalState lossyfs(cfs, 0.08);
//const SmearedParticles lossyfs(cfs, [](const Particle&){ return 0.92; });
declare(lossyfs, "FS");
declare(FastJets(lossyfs, FastJets::TRACKJET, 0.7), "TrackJet");
book(_numvsDeltaPhi2 ,1, 1, 1);
book(_numvsDeltaPhi5 ,1, 1, 2);
book(_numvsDeltaPhi30 ,1, 1, 3);
book(_pTvsDeltaPhi2 ,2, 1, 1);
book(_pTvsDeltaPhi5 ,2, 1, 2);
book(_pTvsDeltaPhi30 ,2, 1, 3);
book(_numTowardMB ,3, 1, 1);
book(_numTransMB ,3, 1, 2);
book(_numAwayMB ,3, 1, 3);
book(_numTowardJ20 ,4, 1, 1);
book(_numTransJ20 ,4, 1, 2);
book(_numAwayJ20 ,4, 1, 3);
book(_ptsumTowardMB ,5, 1, 1);
book(_ptsumTransMB ,5, 1, 2);
book(_ptsumAwayMB ,5, 1, 3);
book(_ptsumTowardJ20 ,6, 1, 1);
book(_ptsumTransJ20 ,6, 1, 2);
book(_ptsumAwayJ20 ,6, 1, 3);
book(_ptTrans2 ,7, 1, 1);
book(_ptTrans5 ,7, 1, 2);
book(_ptTrans30 ,7, 1, 3);
+
+ book(_totalNumTrans2, "totalNumTrans2");
+ book(_totalNumTrans5, "totalNumTrans5");
+ book(_totalNumTrans30, "totalNumTrans30");
+ book(_sumWeightsPtLead2, "sumWeightsPtLead2");
+ book(_sumWeightsPtLead5, "sumWeightsPtLead5");
+ book(_sumWeightsPtLead30, "sumWeightsPtLead30");
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
// Get jets, sorted by pT
const Jets jets = apply<JetAlg>(event, "TrackJet").jetsByPt();
if (jets.empty()) vetoEvent;
const Jet jet1 = jets.front();
const double ptLead = jet1.pT();
// Cut on highest pT jet: combined 0.5 GeV < pT(lead) < 50 GeV
if (ptLead/GeV < 0.5) vetoEvent;
if (ptLead/GeV > 50.0) vetoEvent;
// Count sum of all event weights in three pT_lead regions
- const double weight = 1.0;
- if (ptLead/GeV > 2.0) _sumWeightsPtLead2 += weight;
- if (ptLead/GeV > 5.0) _sumWeightsPtLead5 += weight;
- if (ptLead/GeV > 30.0) _sumWeightsPtLead30 += weight;
+ if (ptLead/GeV > 2.0) _sumWeightsPtLead2->fill();
+ if (ptLead/GeV > 5.0) _sumWeightsPtLead5->fill();
+ if (ptLead/GeV > 30.0) _sumWeightsPtLead30->fill();
// Run over tracks
double ptSumToward(0.0), ptSumAway(0.0), ptSumTrans(0.0);
size_t numToward(0), numTrans(0), numAway(0);
// Temporary histos that bin N and pT in dphi
Profile1D htmp_num_dphi_2(refData(1, 1, 1)), htmp_num_dphi_5(refData(1, 1, 2)), htmp_num_dphi_30(refData(1, 1, 3));
Profile1D htmp_pt_dphi_2(refData(2, 1, 1)), htmp_pt_dphi_5(refData(2, 1, 2)), htmp_pt_dphi_30(refData(2, 1, 3));
// Final state charged particles
/// @todo Non-trackjet track efficiencies are corrected?
const Particles& tracks = apply<FinalState>(event, "FS").particles();
for (const Particle& p : tracks) {
const double dPhi = deltaPhi(p, jet1);
const double pT = p.pT();
if (dPhi < PI/3.0) {
ptSumToward += pT;
++numToward;
}
else if (dPhi < 2*PI/3.0) {
ptSumTrans += pT;
++numTrans;
// Fill transverse pT distributions
if (ptLead/GeV > 2.0) {
- _ptTrans2->fill(pT/GeV, weight);
- _totalNumTrans2 += weight;
+ _ptTrans2->fill(pT/GeV);
+ _totalNumTrans2->fill();
}
if (ptLead/GeV > 5.0) {
- _ptTrans5->fill(pT/GeV, weight);
- _totalNumTrans5 += weight;
+ _ptTrans5->fill(pT/GeV);
+ _totalNumTrans5->fill();
}
if (ptLead/GeV > 30.0) {
- _ptTrans30->fill(pT/GeV, weight);
- _totalNumTrans30 += weight;
+ _ptTrans30->fill(pT/GeV);
+ _totalNumTrans30->fill();
}
}
else {
ptSumAway += pT;
++numAway;
}
// Fill tmp histos to bin event's track Nch & pT in dphi
const double dPhideg = 180*dPhi/M_PI;
if (ptLead/GeV > 2.0) {
htmp_num_dphi_2.fill(dPhideg, 1);
htmp_pt_dphi_2.fill (dPhideg, pT/GeV);
}
if (ptLead/GeV > 5.0) {
htmp_num_dphi_5.fill(dPhideg, 1);
htmp_pt_dphi_5.fill (dPhideg, pT/GeV);
}
if (ptLead/GeV > 30.0) {
htmp_num_dphi_30.fill(dPhideg, 1);
htmp_pt_dphi_30.fill (dPhideg, pT/GeV);
}
}
// Update the "proper" dphi profile histograms
for (int i = 0; i < 50; i++) { ///< @todo Should really explicitly iterate over nbins for each temp histo
if (ptLead/GeV > 2.0) {
const double x2 = htmp_pt_dphi_2.bin(i).xMid();
const double num2 = (htmp_num_dphi_2.bin(i).numEntries() > 0) ? htmp_num_dphi_2.bin(i).mean() : 0.0;
const double pt2 = (htmp_num_dphi_2.bin(i).numEntries() > 0) ? htmp_pt_dphi_2.bin(i).mean() : 0.0;
- _numvsDeltaPhi2->fill(x2, num2, weight);
- _pTvsDeltaPhi2->fill(x2, pt2, weight);
+ _numvsDeltaPhi2->fill(x2, num2);
+ _pTvsDeltaPhi2->fill(x2, pt2);
}
if (ptLead/GeV > 5.0) {
const double x5 = htmp_pt_dphi_5.bin(i).xMid();
const double num5 = (htmp_num_dphi_5.bin(i).numEntries() > 0) ? htmp_num_dphi_5.bin(i).mean() : 0.0;
const double pt5 = (htmp_num_dphi_5.bin(i).numEntries() > 0) ? htmp_pt_dphi_5.bin(i).mean() : 0.0;
- _numvsDeltaPhi5->fill(x5, num5, weight);
- _pTvsDeltaPhi5->fill(x5, pt5, weight);
+ _numvsDeltaPhi5->fill(x5, num5);
+ _pTvsDeltaPhi5->fill(x5, pt5);
}
if (ptLead/GeV > 30.0) {
const double x30 = htmp_pt_dphi_30.bin(i).xMid();
const double num30 = (htmp_num_dphi_30.bin(i).numEntries() > 0) ? htmp_num_dphi_30.bin(i).mean() : 0.0;
const double pt30 = (htmp_num_dphi_30.bin(i).numEntries() > 0) ? htmp_pt_dphi_30.bin(i).mean() : 0.0;
- _numvsDeltaPhi30->fill(x30, num30, weight);
- _pTvsDeltaPhi30->fill(x30, pt30, weight);
+ _numvsDeltaPhi30->fill(x30, num30);
+ _pTvsDeltaPhi30->fill(x30, pt30);
}
}
// Log some event details about pT
MSG_DEBUG("pT [lead; twd, away, trans] = [" << ptLead << "; "
<< ptSumToward << ", " << ptSumAway << ", " << ptSumTrans << "]");
// Update the pT profile histograms
- _ptsumTowardMB->fill(ptLead/GeV, ptSumToward/GeV, weight);
- _ptsumTowardJ20->fill(ptLead/GeV, ptSumToward/GeV, weight);
+ _ptsumTowardMB->fill(ptLead/GeV, ptSumToward/GeV);
+ _ptsumTowardJ20->fill(ptLead/GeV, ptSumToward/GeV);
- _ptsumTransMB->fill(ptLead/GeV, ptSumTrans/GeV, weight);
- _ptsumTransJ20->fill(ptLead/GeV, ptSumTrans/GeV, weight);
+ _ptsumTransMB->fill(ptLead/GeV, ptSumTrans/GeV);
+ _ptsumTransJ20->fill(ptLead/GeV, ptSumTrans/GeV);
- _ptsumAwayMB->fill(ptLead/GeV, ptSumAway/GeV, weight);
- _ptsumAwayJ20->fill(ptLead/GeV, ptSumAway/GeV, weight);
+ _ptsumAwayMB->fill(ptLead/GeV, ptSumAway/GeV);
+ _ptsumAwayJ20->fill(ptLead/GeV, ptSumAway/GeV);
// Log some event details about Nch
MSG_DEBUG("N [twd, away, trans] = [" << ptLead << "; "
<< numToward << ", " << numTrans << ", " << numAway << "]");
// Update the N_track profile histograms
- _numTowardMB->fill(ptLead/GeV, numToward, weight);
- _numTowardJ20->fill(ptLead/GeV, numToward, weight);
+ _numTowardMB->fill(ptLead/GeV, numToward);
+ _numTowardJ20->fill(ptLead/GeV, numToward);
- _numTransMB->fill(ptLead/GeV, numTrans, weight);
- _numTransJ20->fill(ptLead/GeV, numTrans, weight);
+ _numTransMB->fill(ptLead/GeV, numTrans);
+ _numTransJ20->fill(ptLead/GeV, numTrans);
- _numAwayMB->fill(ptLead/GeV, numAway, weight);
- _numAwayJ20->fill(ptLead/GeV, numAway, weight);
+ _numAwayMB->fill(ptLead/GeV, numAway);
+ _numAwayJ20->fill(ptLead/GeV, numAway);
}
/// Normalize histos
void finalize() {
normalize(_ptTrans2, _totalNumTrans2 / _sumWeightsPtLead2);
normalize(_ptTrans5, _totalNumTrans5 / _sumWeightsPtLead5);
normalize(_ptTrans30, _totalNumTrans30 / _sumWeightsPtLead30);
}
//@}
private:
/// Sum total number of charged particles in the trans region, in 3 \f$ p_\perp^\text{lead} \f$ bins.
- double _totalNumTrans2, _totalNumTrans5, _totalNumTrans30;
+ CounterPtr _totalNumTrans2, _totalNumTrans5, _totalNumTrans30;
/// Sum the total number of events in 3 \f$ p_\perp^\text{lead} \f$ bins.
- double _sumWeightsPtLead2,_sumWeightsPtLead5, _sumWeightsPtLead30;
+ CounterPtr _sumWeightsPtLead2,_sumWeightsPtLead5, _sumWeightsPtLead30;
/// @name Histogram collections
//@{
// The sumpt vs. dphi and Nch vs. dphi histos
Profile1DPtr _numvsDeltaPhi2, _numvsDeltaPhi5, _numvsDeltaPhi30;
Profile1DPtr _pTvsDeltaPhi2, _pTvsDeltaPhi5, _pTvsDeltaPhi30;
/// Profile histograms, binned in the \f$ p_T \f$ of the leading jet, for
/// the \f$ p_T \f$ sum in the toward, transverse and away regions.
Profile1DPtr _ptsumTowardMB, _ptsumTransMB, _ptsumAwayMB;
Profile1DPtr _ptsumTowardJ20, _ptsumTransJ20, _ptsumAwayJ20;
/// Profile histograms, binned in the \f$ p_T \f$ of the leading jet, for
/// the number of charged particles per jet in the toward, transverse and
/// away regions.
Profile1DPtr _numTowardMB, _numTransMB, _numAwayMB;
Profile1DPtr _numTowardJ20, _numTransJ20, _numAwayJ20;
/// Histogram of \f$ p_T \f$ distribution for 3 different \f$ p_{T1} \f$ IR cutoffs.
Histo1DPtr _ptTrans2, _ptTrans5, _ptTrans30;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2001_S4751469);
}
diff --git a/analyses/pluginCDF/CDF_2002_S4796047.cc b/analyses/pluginCDF/CDF_2002_S4796047.cc
--- a/analyses/pluginCDF/CDF_2002_S4796047.cc
+++ b/analyses/pluginCDF/CDF_2002_S4796047.cc
@@ -1,122 +1,122 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
namespace Rivet {
/// @brief CDF Run I charged multiplicity measurement
/// @author Hendrik Hoeth
///
/// This analysis measures the charged multiplicity distribution
/// in minimum bias events at two different center-of-mass energies:
/// \f$ \sqrt{s} = \f$ 630 and 1800 GeV.
///
/// Particles with c*tau > 10 mm are considered stable, i.e. they
/// are reconstructed and their decay products removed. Selection
/// cuts are |eta|<1 and pT>0.4 GeV.
///
/// @par Run conditions
///
/// @arg Two different beam energies: \f$ \sqrt{s} = \$f 630 & 1800 GeV
/// @arg Run with generic QCD events.
/// @arg Set particles with c*tau > 10 mm stable
class CDF_2002_S4796047 : public Analysis {
public:
/// Constructor
CDF_2002_S4796047()
: Analysis("CDF_2002_S4796047")
{
- _sumWTrig = 0;
}
/// @name Analysis methods
//@{
/// Book projections and histograms
void init() {
declare(TriggerCDFRun0Run1(), "Trigger");
const ChargedFinalState cfs(-1.0, 1.0, 0.4*GeV);
declare(cfs, "FS");
// Histos
if (fuzzyEquals(sqrtS()/GeV, 630)) {
book(_hist_multiplicity ,1, 1, 1);
book(_hist_pt_vs_multiplicity ,3, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 1800)) {
book(_hist_multiplicity ,2, 1, 1);
book(_hist_pt_vs_multiplicity ,4, 1, 1);
}
+ book(_sumWTrig, "sumWTrig");
+
}
/// Do the analysis
void analyze(const Event& evt) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(evt, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
- const double weight = 1.0;
- _sumWTrig += weight;
+ _sumWTrig->fill();
// Get beam energy and tracks
const ChargedFinalState& fs = apply<ChargedFinalState>(evt, "FS");
const size_t numParticles = fs.particles().size();
// Fill histos of charged multiplicity distributions
- _hist_multiplicity->fill(numParticles, weight);
+ _hist_multiplicity->fill(numParticles);
// Fill histos for <pT> vs. charged multiplicity
foreach (const Particle& p, fs.particles()) {
const double pT = p.pT();
- _hist_pt_vs_multiplicity->fill(numParticles, pT/GeV, weight);
+ _hist_pt_vs_multiplicity->fill(numParticles, pT/GeV);
}
}
void finalize() {
// This normalisation is NOT a cross-section.
// In the paper the x-axes (!) of the histograms are
// scaled such that they can put both energies in the
// same plot. Of course this affects the area, too.
// Since we want to plot the actual multiplicity, we
// scale the x-axes back and have to adjust the areas
// accordingly. The scale factors are given in the
// legend of the plot in the paper. Have a look at
// figure 1 and everything immediately becomes clear.
// DON'T TRY TO REPAIR THIS, YOU WILL BREAK IT.
if (fuzzyEquals(sqrtS()/GeV, 630)) {
normalize(_hist_multiplicity, 3.21167); // fixed norm OK
} else if (fuzzyEquals(sqrtS()/GeV, 1800)) {
normalize(_hist_multiplicity, 4.19121); // fixed norm OK
}
}
//@}
private:
/// @name Counter
//@{
- double _sumWTrig;
+ CounterPtr _sumWTrig;
//@}
/// @name Histos
//@{
Histo1DPtr _hist_multiplicity;
Profile1DPtr _hist_pt_vs_multiplicity;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2002_S4796047);
}
diff --git a/analyses/pluginCDF/CDF_2004_S5839831.cc b/analyses/pluginCDF/CDF_2004_S5839831.cc
--- a/analyses/pluginCDF/CDF_2004_S5839831.cc
+++ b/analyses/pluginCDF/CDF_2004_S5839831.cc
@@ -1,386 +1,385 @@
// -*- C++ -*-
// "Acosta" underlying event analysis at CDF, inc. "Swiss Cheese"
#include "Rivet/Analysis.hh"
#include "Rivet/Jet.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
namespace Rivet {
/// @brief CDF calo jet underlying event analysis at 630 and 1800 GeV
///
/// CDF measurement of underlying event using calorimeter jet scales and
/// alignment, particle flow activity in transverse cones, and the Swiss
/// Cheese analysis method, where cones are excluded around the 2 and 3
/// hardest jets.
///
/// @author Andy Buckley
class CDF_2004_S5839831 : public Analysis {
public:
/// Constructor: cuts on charged final state are \f$ -1 < \eta < 1 \f$
/// and \f$ p_T > 0.4 \f$ GeV.
CDF_2004_S5839831()
: Analysis("CDF_2004_S5839831")
{ }
private:
/// @cond CONEUE_DETAIL
struct ConesInfo {
ConesInfo() : numMax(0), numMin(0), ptMax(0), ptMin(0), ptDiff(0) {}
unsigned int numMax, numMin;
double ptMax, ptMin, ptDiff;
};
/// @endcond
ConesInfo _calcTransCones(const double etaLead, const double phiLead,
const Particles& tracks) {
const double phiTransPlus = mapAngle0To2Pi(phiLead + PI/2.0);
const double phiTransMinus = mapAngle0To2Pi(phiLead - PI/2.0);
MSG_DEBUG("phi_lead = " << phiLead
<< " -> trans = (" << phiTransPlus
<< ", " << phiTransMinus << ")");
unsigned int numPlus(0), numMinus(0);
double ptPlus(0), ptMinus(0);
// Run over all charged tracks
foreach (const Particle& t, tracks) {
FourMomentum trackMom = t.momentum();
const double pt = trackMom.pT();
// Find if track mom is in either transverse cone
if (deltaR(trackMom, etaLead, phiTransPlus) < 0.7) {
ptPlus += pt;
numPlus += 1;
} else if (deltaR(trackMom, etaLead, phiTransMinus) < 0.7) {
ptMinus += pt;
numMinus += 1;
}
}
ConesInfo rtn;
// Assign N_{min,max} from N_{plus,minus}
rtn.numMax = (ptPlus >= ptMinus) ? numPlus : numMinus;
rtn.numMin = (ptPlus >= ptMinus) ? numMinus : numPlus;
// Assign pT_{min,max} from pT_{plus,minus}
rtn.ptMax = (ptPlus >= ptMinus) ? ptPlus : ptMinus;
rtn.ptMin = (ptPlus >= ptMinus) ? ptMinus : ptPlus;
rtn.ptDiff = fabs(rtn.ptMax - rtn.ptMin);
MSG_DEBUG("Min cone has " << rtn.numMin << " tracks -> "
<< "pT_min = " << rtn.ptMin/GeV << " GeV");
MSG_DEBUG("Max cone has " << rtn.numMax << " tracks -> "
<< "pT_max = " << rtn.ptMax/GeV << " GeV");
return rtn;
}
ConesInfo _calcTransCones(const FourMomentum& leadvec,
const Particles& tracks) {
const double etaLead = leadvec.eta();
const double phiLead = leadvec.phi();
return _calcTransCones(etaLead, phiLead, tracks);
}
/// @name Analysis methods
//@{
void init() {
// Set up projections
declare(TriggerCDFRun0Run1(), "Trigger");
declare(Beam(), "Beam");
const FinalState calofs(-1.2, 1.2);
declare(calofs, "CaloFS");
declare(FastJets(calofs, FastJets::CDFJETCLU, 0.7), "Jets");
const ChargedFinalState trackfs(-1.2, 1.2, 0.4*GeV);
declare(trackfs, "TrackFS");
// Restrict tracks to |eta| < 0.7 for the min bias part.
const ChargedFinalState mbfs(-0.7, 0.7, 0.4*GeV);
declare(mbfs, "MBFS");
// Restrict tracks to |eta| < 1 for the Swiss-Cheese part.
const ChargedFinalState cheesefs(-1.0, 1.0, 0.4*GeV);
declare(cheesefs, "CheeseFS");
declare(FastJets(cheesefs, FastJets::CDFJETCLU, 0.7), "CheeseJets");
// Book histograms
if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) {
book(_pt90MaxAvg1800 ,1, 1, 1);
book(_pt90MinAvg1800 ,1, 1, 2);
book(_pt90Max1800 ,2, 1, 1);
book(_pt90Min1800 ,2, 1, 2);
book(_pt90Diff1800 ,2, 1, 3);
book(_num90Max1800 ,4, 1, 1);
book(_num90Min1800 ,4, 1, 2);
book(_pTSum1800_2Jet ,7, 1, 1);
book(_pTSum1800_3Jet ,7, 1, 2);
book(_pt90Dbn1800Et40 ,3, 1, 1);
book(_pt90Dbn1800Et80 ,3, 1, 2);
book(_pt90Dbn1800Et120 ,3, 1, 3);
book(_pt90Dbn1800Et160 ,3, 1, 4);
book(_pt90Dbn1800Et200 ,3, 1, 5);
book(_numTracksDbn1800MB ,5, 1, 1);
book(_ptDbn1800MB ,6, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) {
book(_pt90Max630 ,8, 1, 1);
book(_pt90Min630 ,8, 1, 2);
book(_pt90Diff630 ,8, 1, 3);
book(_pTSum630_2Jet ,9, 1, 1);
book(_pTSum630_3Jet ,9, 1, 2);
book(_numTracksDbn630MB ,10, 1, 1);
book(_ptDbn630MB ,11, 1, 1);
}
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
// Get sqrt(s) and event weight
const double sqrtS = apply<Beam>(event, "Beam").sqrtS();
- const double weight = 1.0;
{
MSG_DEBUG("Running max/min analysis");
Jets jets = apply<JetAlg>(event, "Jets").jets(cmpMomByE);
if (!jets.empty()) {
// Leading jet must be in central |eta| < 0.5 region
const Jet leadingjet = jets.front();
const double etaLead = leadingjet.eta();
// Get Et of the leading jet: used to bin histograms
const double ETlead = leadingjet.Et();
MSG_DEBUG("Leading Et = " << ETlead/GeV << " GeV");
if (fabs(etaLead) > 0.5 && ETlead < 15*GeV) {
MSG_DEBUG("Leading jet eta = " << etaLead
<< " not in |eta| < 0.5 & pT > 15 GeV");
} else {
// Multiplicity & pT distributions for sqrt(s) = 630 GeV, 1800 GeV
const Particles tracks = apply<FinalState>(event, "TrackFS").particles();
const ConesInfo cones = _calcTransCones(leadingjet.momentum(), tracks);
if (fuzzyEquals(sqrtS/GeV, 630)) {
- _pt90Max630->fill(ETlead/GeV, cones.ptMax/GeV, weight);
- _pt90Min630->fill(ETlead/GeV, cones.ptMin/GeV, weight);
- _pt90Diff630->fill(ETlead/GeV, cones.ptDiff/GeV, weight);
+ _pt90Max630->fill(ETlead/GeV, cones.ptMax/GeV);
+ _pt90Min630->fill(ETlead/GeV, cones.ptMin/GeV);
+ _pt90Diff630->fill(ETlead/GeV, cones.ptDiff/GeV);
} else if (fuzzyEquals(sqrtS/GeV, 1800)) {
- _num90Max1800->fill(ETlead/GeV, cones.numMax, weight);
- _num90Min1800->fill(ETlead/GeV, cones.numMin, weight);
- _pt90Max1800->fill(ETlead/GeV, cones.ptMax/GeV, weight);
- _pt90Min1800->fill(ETlead/GeV, cones.ptMin/GeV, weight);
- _pt90Diff1800->fill(ETlead/GeV, cones.ptDiff/GeV, weight);
- _pt90MaxAvg1800->fill(ETlead/GeV, cones.ptMax/GeV, weight); // /numMax
- _pt90MinAvg1800->fill(ETlead/GeV, cones.ptMin/GeV, weight); // /numMin
+ _num90Max1800->fill(ETlead/GeV, cones.numMax);
+ _num90Min1800->fill(ETlead/GeV, cones.numMin);
+ _pt90Max1800->fill(ETlead/GeV, cones.ptMax/GeV);
+ _pt90Min1800->fill(ETlead/GeV, cones.ptMin/GeV);
+ _pt90Diff1800->fill(ETlead/GeV, cones.ptDiff/GeV);
+ _pt90MaxAvg1800->fill(ETlead/GeV, cones.ptMax/GeV); // /numMax
+ _pt90MinAvg1800->fill(ETlead/GeV, cones.ptMin/GeV); // /numMin
//
const double ptTransTotal = cones.ptMax + cones.ptMin;
if (inRange(ETlead/GeV, 40., 80.)) {
- _pt90Dbn1800Et40->fill(ptTransTotal/GeV, weight);
+ _pt90Dbn1800Et40->fill(ptTransTotal/GeV);
} else if (inRange(ETlead/GeV, 80., 120.)) {
- _pt90Dbn1800Et80->fill(ptTransTotal/GeV, weight);
+ _pt90Dbn1800Et80->fill(ptTransTotal/GeV);
} else if (inRange(ETlead/GeV, 120., 160.)) {
- _pt90Dbn1800Et120->fill(ptTransTotal/GeV, weight);
+ _pt90Dbn1800Et120->fill(ptTransTotal/GeV);
} else if (inRange(ETlead/GeV, 160., 200.)) {
- _pt90Dbn1800Et160->fill(ptTransTotal/GeV, weight);
+ _pt90Dbn1800Et160->fill(ptTransTotal/GeV);
} else if (inRange(ETlead/GeV, 200., 270.)) {
- _pt90Dbn1800Et200->fill(ptTransTotal/GeV, weight);
+ _pt90Dbn1800Et200->fill(ptTransTotal/GeV);
}
}
}
}
}
// Fill min bias total track multiplicity histos
{
MSG_DEBUG("Running min bias multiplicity analysis");
const Particles mbtracks = apply<FinalState>(event, "MBFS").particles();
if (fuzzyEquals(sqrtS/GeV, 1800)) {
- _numTracksDbn1800MB->fill(mbtracks.size(), weight);
+ _numTracksDbn1800MB->fill(mbtracks.size());
} else if (fuzzyEquals(sqrtS/GeV, 630)) {
- _numTracksDbn630MB->fill(mbtracks.size(), weight);
+ _numTracksDbn630MB->fill(mbtracks.size());
}
// Run over all charged tracks
foreach (const Particle& t, mbtracks) {
FourMomentum trackMom = t.momentum();
const double pt = trackMom.pT();
// Plot total pT distribution for min bias
if (fuzzyEquals(sqrtS/GeV, 1800)) {
- _ptDbn1800MB->fill(pt/GeV, weight);
+ _ptDbn1800MB->fill(pt/GeV);
} else if (fuzzyEquals(sqrtS/GeV, 630)) {
- _ptDbn630MB->fill(pt/GeV, weight);
+ _ptDbn630MB->fill(pt/GeV);
}
}
}
// Construct "Swiss Cheese" pT distributions, with pT contributions from
// tracks within R = 0.7 of the 1st, 2nd (and 3rd) jets being ignored. A
// different set of charged tracks, with |eta| < 1.0, is used here, and all
// the removed jets must have Et > 5 GeV.
{
MSG_DEBUG("Running Swiss Cheese analysis");
const Particles cheesetracks = apply<FinalState>(event, "CheeseFS").particles();
Jets cheesejets = apply<JetAlg>(event, "Jets").jets(cmpMomByE);
if (cheesejets.empty()) {
MSG_DEBUG("No 'cheese' jets found in event");
return;
}
if (cheesejets.size() > 1 &&
fabs(cheesejets[0].eta()) <= 0.5 &&
cheesejets[0].Et()/GeV > 5.0 &&
cheesejets[1].Et()/GeV > 5.0) {
const double cheeseETlead = cheesejets[0].Et();
const double eta1 = cheesejets[0].eta();
const double phi1 = cheesejets[0].phi();
const double eta2 = cheesejets[1].eta();
const double phi2 = cheesejets[1].phi();
double ptSumSub2(0), ptSumSub3(0);
foreach (const Particle& t, cheesetracks) {
FourMomentum trackMom = t.momentum();
const double pt = trackMom.pT();
// Subtracting 2 leading jets
const double deltaR1 = deltaR(trackMom, eta1, phi1);
const double deltaR2 = deltaR(trackMom, eta2, phi2);
MSG_TRACE("Track vs jet(1): "
<< "|(" << trackMom.eta() << ", " << trackMom.phi() << ") - "
<< "|(" << eta1 << ", " << phi1 << ")| = " << deltaR1);
MSG_TRACE("Track vs jet(2): "
<< "|(" << trackMom.eta() << ", " << trackMom.phi() << ") - "
<< "|(" << eta2 << ", " << phi2 << ")| = " << deltaR2);
if (deltaR1 > 0.7 && deltaR2 > 0.7) {
ptSumSub2 += pt;
// Subtracting 3rd leading jet
if (cheesejets.size() > 2 &&
cheesejets[2].Et()/GeV > 5.0) {
const double eta3 = cheesejets[2].eta();
const double phi3 = cheesejets[2].phi();
const double deltaR3 = deltaR(trackMom, eta3, phi3);
MSG_TRACE("Track vs jet(3): "
<< "|(" << trackMom.eta() << ", " << trackMom.phi() << ") - "
<< "|(" << eta3 << ", " << phi3 << ")| = " << deltaR3);
if (deltaR3 > 0.7) {
ptSumSub3 += pt;
}
}
}
}
// Swiss Cheese sub 2,3 jets distributions for sqrt(s) = 630 GeV, 1800 GeV
if (fuzzyEquals(sqrtS/GeV, 630)) {
- if (!isZero(ptSumSub2)) _pTSum630_2Jet->fill(cheeseETlead/GeV, ptSumSub2/GeV, weight);
- if (!isZero(ptSumSub3))_pTSum630_3Jet->fill(cheeseETlead/GeV, ptSumSub3/GeV, weight);
+ if (!isZero(ptSumSub2)) _pTSum630_2Jet->fill(cheeseETlead/GeV, ptSumSub2/GeV);
+ if (!isZero(ptSumSub3))_pTSum630_3Jet->fill(cheeseETlead/GeV, ptSumSub3/GeV);
} else if (fuzzyEquals(sqrtS/GeV, 1800)) {
- if (!isZero(ptSumSub2))_pTSum1800_2Jet->fill(cheeseETlead/GeV, ptSumSub2/GeV, weight);
- if (!isZero(ptSumSub3))_pTSum1800_3Jet->fill(cheeseETlead/GeV, ptSumSub3/GeV, weight);
+ if (!isZero(ptSumSub2))_pTSum1800_2Jet->fill(cheeseETlead/GeV, ptSumSub2/GeV);
+ if (!isZero(ptSumSub3))_pTSum1800_3Jet->fill(cheeseETlead/GeV, ptSumSub3/GeV);
}
}
}
}
void finalize() {
/// @todo Take these normalisations from the data histo (it can't come from just the MC)
if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) {
// Normalize to actual number of entries in pT dbn histos...
normalize(_pt90Dbn1800Et40, 1656.75); // norm OK
normalize(_pt90Dbn1800Et80, 4657.5); // norm OK
normalize(_pt90Dbn1800Et120, 5395.5); // norm OK
normalize(_pt90Dbn1800Et160, 7248.75); // norm OK
normalize(_pt90Dbn1800Et200, 2442.0); // norm OK
}
// ...and for min bias distributions:
if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) {
normalize(_numTracksDbn1800MB, 309718.25); // norm OK
normalize(_ptDbn1800MB, 33600.0); // norm OK
} else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) {
normalize(_numTracksDbn630MB, 1101024.0); // norm OK
normalize(_ptDbn630MB, 105088.0); // norm OK
}
}
//@}
private:
/// @name Histogram collections
//@{
/// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for
/// the average \f$ p_T \f$ in the toward, transverse and away regions at
/// \f$ \sqrt{s} = 1800 \text{GeV} \f$.
/// Corresponds to Table 1, and HepData table 1.
Profile1DPtr _pt90MaxAvg1800, _pt90MinAvg1800;
/// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for
/// the \f$ p_T \f$ sum in the toward, transverse and away regions at
/// \f$ \sqrt{s} = 1800 \text{GeV} \f$.
/// Corresponds to figure 2/3, and HepData table 2.
Profile1DPtr _pt90Max1800, _pt90Min1800, _pt90Diff1800;
/// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for
/// the \f$ p_T \f$ sum in the toward, transverse and away regions at
/// at \f$ \sqrt{s} = 630 \text{GeV} \f$.
/// Corresponds to figure 8, and HepData table 8.
Profile1DPtr _pt90Max630, _pt90Min630, _pt90Diff630;
/// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for
/// the cone track multiplicity at \f$ \sqrt{s} = 1800 \text{GeV} \f$.
/// Corresponds to figure 5, and HepData table 4.
Profile1DPtr _num90Max1800, _num90Min1800;
/// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for
/// the \f$ p_T \f$ sum at \f$ \sqrt{s} = 1800 \text{GeV} \f$.
/// Corresponds to figure 7, and HepData table 7.
Profile1DPtr _pTSum1800_2Jet, _pTSum1800_3Jet;
/// Profile histograms, binned in the \f$ E_T \f$ of the leading jet, for
/// the \f$ p_T \f$ sum at \f$ \sqrt{s} = 630 \text{GeV} \f$.
/// Corresponds to figure 9, and HepData table 9.
Profile1DPtr _pTSum630_2Jet, _pTSum630_3Jet;
/// Histogram of \f$ p_{T\text{sum}} \f$ distribution for 5 different
/// \f$ E_{T1} \f$ bins.
/// Corresponds to figure 4, and HepData table 3.
Histo1DPtr _pt90Dbn1800Et40, _pt90Dbn1800Et80, _pt90Dbn1800Et120,
_pt90Dbn1800Et160, _pt90Dbn1800Et200;
/// Histograms of track multiplicity and \f$ p_T \f$ distributions for
/// minimum bias events.
/// Figure 6, and HepData tables 5 & 6.
/// Figure 10, and HepData tables 10 & 11.
Histo1DPtr _numTracksDbn1800MB, _ptDbn1800MB;
Histo1DPtr _numTracksDbn630MB, _ptDbn630MB;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2004_S5839831);
}
diff --git a/analyses/pluginCDF/CDF_2005_S6080774.cc b/analyses/pluginCDF/CDF_2005_S6080774.cc
--- a/analyses/pluginCDF/CDF_2005_S6080774.cc
+++ b/analyses/pluginCDF/CDF_2005_S6080774.cc
@@ -1,106 +1,104 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include <array>
namespace Rivet {
/// @brief CDF diff cross-sections for prompt di-photon production
class CDF_2005_S6080774 : public Analysis {
public:
/// Constructor
CDF_2005_S6080774()
: Analysis("CDF_2005_S6080774")
{ }
/// @name Analysis methods
//@{
void init() {
FinalState fs;
declare(fs, "FS");
IdentifiedFinalState ifs(Cuts::abseta < 0.9 && Cuts::pT > 13*GeV);
ifs.acceptId(PID::PHOTON);
declare(ifs, "IFS");
for (size_t yAxisId=0; yAxisId<4; ++yAxisId) {
book(_h_m_PP[yAxisId], 1, 1, yAxisId + 1);
book(_h_pT_PP[yAxisId], 2, 1, yAxisId + 1);
book(_h_dphi_PP[yAxisId], 3, 1, yAxisId + 1);
}
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
Particles photons = apply<IdentifiedFinalState>(event, "IFS").particlesByPt();
if (photons.size() < 2 || photons[0].pT() < 14.0*GeV) {
vetoEvent;
}
// Isolate photons with ET_sum in cone
Particles isolated_photons;
Particles fs = apply<FinalState>(event, "FS").particles();
foreach (const Particle& photon, photons) {
FourMomentum mom_in_cone;
double eta_P = photon.eta();
double phi_P = photon.phi();
foreach (const Particle& p, fs) {
if (deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.4) {
mom_in_cone += p.momentum();
}
}
if (mom_in_cone.Et()-photon.Et() < 1.0*GeV) {
isolated_photons.push_back(photon);
}
}
if (isolated_photons.size() != 2) {
vetoEvent;
}
FourMomentum mom_PP = isolated_photons[0].momentum() + isolated_photons[1].momentum();
for (size_t i=0; i<4; ++i) {
- _h_m_PP[i]->fill(mom_PP.mass(), weight);
- _h_pT_PP[i]->fill(mom_PP.pT(), weight);
+ _h_m_PP[i]->fill(mom_PP.mass());
+ _h_pT_PP[i]->fill(mom_PP.pT());
_h_dphi_PP[i]->fill(mapAngle0ToPi(isolated_photons[0].phi()-
- isolated_photons[1].phi())/M_PI, weight);
+ isolated_photons[1].phi())/M_PI);
}
}
void finalize() {
for (size_t i=0; i<4; ++i) {
scale(_h_m_PP[i], crossSection()/sumOfWeights());
scale(_h_pT_PP[i], crossSection()/sumOfWeights());
scale(_h_dphi_PP[i], crossSection()/M_PI/sumOfWeights());
}
}
//@}
private:
/// @name Histograms
//@{
std::array<Histo1DPtr,4> _h_m_PP;
std::array<Histo1DPtr,4> _h_pT_PP;
std::array<Histo1DPtr,4> _h_dphi_PP;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2005_S6080774);
}
diff --git a/analyses/pluginCDF/CDF_2006_S6450792.cc b/analyses/pluginCDF/CDF_2006_S6450792.cc
--- a/analyses/pluginCDF/CDF_2006_S6450792.cc
+++ b/analyses/pluginCDF/CDF_2006_S6450792.cc
@@ -1,61 +1,61 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief CDF Inclusive jet cross-section differential in \f$ p_\perp \f$
class CDF_2006_S6450792 : public Analysis {
public:
/// Constructor
CDF_2006_S6450792()
: Analysis("CDF_2006_S6450792")
{ }
/// @name Analysis methods
//@{
void init() {
FinalState fs;
declare(FastJets(fs, FastJets::CDFMIDPOINT, 0.7), "ConeFinder");
book(_h_jet_pt ,1, 1, 1);
}
void analyze(const Event& event) {
const Jets& jets = apply<JetAlg>(event, "ConeFinder").jets(Cuts::pT > 61*GeV);
foreach (const Jet& jet, jets) {
if (inRange(jet.absrap(), 0.1, 0.7))
- _h_jet_pt->fill(jet.pT()/GeV, 1.0);
+ _h_jet_pt->fill(jet.pT()/GeV);
}
}
void finalize() {
const double delta_y = 1.2;
scale(_h_jet_pt, crossSection()/nanobarn/sumOfWeights()/delta_y);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet_pt;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2006_S6450792);
}
diff --git a/analyses/pluginCDF/CDF_2006_S6653332.cc b/analyses/pluginCDF/CDF_2006_S6653332.cc
--- a/analyses/pluginCDF/CDF_2006_S6653332.cc
+++ b/analyses/pluginCDF/CDF_2006_S6653332.cc
@@ -1,177 +1,180 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ChargedLeptons.hh"
namespace Rivet {
/// @brief CDF Run II analysis: jet \f$ p_T \f$ and \f$ \eta \f$
/// distributions in Z + (b) jet production
/// @author Lars Sonnenschein
///
/// This CDF analysis provides \f$ p_T \f$ and \f$ \eta \f$ distributions of
/// jets in Z + (b) jet production, before and after tagging.
class CDF_2006_S6653332 : public Analysis {
public:
/// Constructor
CDF_2006_S6653332()
: Analysis("CDF_2006_S6653332"),
- _Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(1.1),
- _sumWeightsWithZ(0.0), _sumWeightsWithZJet(0.0)
+ _Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(1.1)
{ }
/// @name Analysis methods
//@{
void init() {
const FinalState fs(-3.6, 3.6);
declare(fs, "FS");
// Create a final state with any e+e- or mu+mu- pair with
// invariant mass 76 -> 106 GeV and ET > 20 (Z decay products)
vector<pair<PdgId,PdgId> > vids;
vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON));
vids.push_back(make_pair(PID::MUON, PID::ANTIMUON));
FinalState fs2(-3.6, 3.6);
InvMassFinalState invfs(fs2, vids, 66*GeV, 116*GeV);
declare(invfs, "INVFS");
// Make a final state without the Z decay products for jet clustering
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(invfs);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::CDFMIDPOINT, 0.7), "Jets");
// Book histograms
book(_sigmaBJet ,1, 1, 1);
book(_ratioBJetToZ ,2, 1, 1);
book(_ratioBJetToJet ,3, 1, 1);
+
+
+ book(_sumWeightsWithZ, "sumWeightsWithZ");
+ book(_sumWeightsWithZJet, "sumWeightsWithZJet");
}
/// Do the analysis
void analyze(const Event& event) {
// Check we have an l+l- pair that passes the kinematic cuts
// Get the Z decay products (mu+mu- or e+e- pair)
const InvMassFinalState& invMassFinalState = apply<InvMassFinalState>(event, "INVFS");
const Particles& ZDecayProducts = invMassFinalState.particles();
// Make sure we have at least 2 Z decay products (mumu or ee)
if (ZDecayProducts.size() < 2) vetoEvent;
//
double Lep1Pt = ZDecayProducts[0].pT();
double Lep2Pt = ZDecayProducts[1].pT();
double Lep1Eta = ZDecayProducts[0].absrap(); ///< @todo This is y... should be abseta()?
double Lep2Eta = ZDecayProducts[1].absrap(); ///< @todo This is y... should be abseta()?
if (Lep1Eta > _LepEtaCut && Lep2Eta > _LepEtaCut) vetoEvent;
if (ZDecayProducts[0].abspid()==13 && Lep1Eta > 1. && Lep2Eta > 1.) vetoEvent;
if (Lep1Pt < _Lep1PtCut && Lep2Pt < _Lep2PtCut) vetoEvent;
- _sumWeightsWithZ += 1.0;
+ _sumWeightsWithZ->fill();
/// @todo Write out a warning if there are more than two decay products
FourMomentum Zmom = ZDecayProducts[0].momentum() + ZDecayProducts[1].momentum();
// Put all b-quarks in a vector
/// @todo Use jet contents rather than accessing quarks directly
Particles bquarks;
/// @todo Use nicer looping
for (GenEvent::particle_const_iterator p = event.genEvent()->particles_begin(); p != event.genEvent()->particles_end(); ++p) {
if ( std::abs((*p)->pdg_id()) == PID::BQUARK ) {
bquarks.push_back(Particle(**p));
}
}
// Get jets
const FastJets& jetpro = apply<FastJets>(event, "Jets");
MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size());
const PseudoJets& jets = jetpro.pseudoJetsByPt();
MSG_DEBUG("jetlist size = " << jets.size());
int numBJet = 0;
int numJet = 0;
// for each b-jet plot the ET and the eta of the jet, normalise to the total cross section at the end
// for each event plot N jet and pT(Z), normalise to the total cross section at the end
for (PseudoJets::const_iterator jt = jets.begin(); jt != jets.end(); ++jt) {
// select jets that pass the kinematic cuts
if (jt->perp() > _JetPtCut && fabs(jt->rapidity()) <= _JetEtaCut) {
++numJet;
// Does the jet contain a b-quark?
/// @todo Use jet contents rather than accessing quarks directly
bool bjet = false;
foreach (const Particle& bquark, bquarks) {
if (deltaR(jt->rapidity(), jt->phi(), bquark.rapidity(), bquark.phi()) <= _Rjet) {
bjet = true;
break;
}
} // end loop around b-jets
if (bjet) {
numBJet++;
}
}
} // end loop around jets
- if (numJet > 0) _sumWeightsWithZJet += 1.0;
+ if (numJet > 0) _sumWeightsWithZJet->fill();
if (numBJet > 0) {
- _sigmaBJet->fill(1960.0,1.0);
- _ratioBJetToZ->fill(1960.0,1.0);
- _ratioBJetToJet->fill(1960.0,1.0);
+ _sigmaBJet->fill(1960.0);
+ _ratioBJetToZ->fill(1960.0);
+ _ratioBJetToJet->fill(1960.0);
}
}
/// Finalize
void finalize() {
MSG_DEBUG("Total sum of weights = " << sumOfWeights());
- MSG_DEBUG("Sum of weights for Z production in mass range = " << _sumWeightsWithZ);
- MSG_DEBUG("Sum of weights for Z+jet production in mass range = " << _sumWeightsWithZJet);
+ MSG_DEBUG("Sum of weights for Z production in mass range = " << double(_sumWeightsWithZ));
+ MSG_DEBUG("Sum of weights for Z+jet production in mass range = " << double(_sumWeightsWithZJet));
scale(_sigmaBJet, crossSection()/sumOfWeights());
scale(_ratioBJetToZ, 1.0/_sumWeightsWithZ);
scale(_ratioBJetToJet, 1.0/_sumWeightsWithZJet);
}
//@}
private:
/// @name Cuts and counters
//@{
double _Rjet;
double _JetPtCut;
double _JetEtaCut;
double _Lep1PtCut;
double _Lep2PtCut;
double _LepEtaCut;
- double _sumWeightsWithZ;
- double _sumWeightsWithZJet;
+ CounterPtr _sumWeightsWithZ;
+ CounterPtr _sumWeightsWithZJet;
//@}
/// @name Histograms
//@{
Histo1DPtr _sigmaBJet;
Histo1DPtr _ratioBJetToZ;
Histo1DPtr _ratioBJetToJet;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2006_S6653332);
}
diff --git a/analyses/pluginCDF/CDF_2008_S7540469.cc b/analyses/pluginCDF/CDF_2008_S7540469.cc
--- a/analyses/pluginCDF/CDF_2008_S7540469.cc
+++ b/analyses/pluginCDF/CDF_2008_S7540469.cc
@@ -1,177 +1,175 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief Measurement differential Z/\f$ \gamma^* \f$ + jet + \f$ X \f$ cross sections
/// @author Frank Siegert
class CDF_2008_S7540469 : public Analysis {
public:
/// Constructor
CDF_2008_S7540469()
: Analysis("CDF_2008_S7540469")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
// Full final state
FinalState fs(-5.0, 5.0);
declare(fs, "FS");
// Leading electrons in tracking acceptance
IdentifiedFinalState elfs(Cuts::abseta < 5 && Cuts::pT > 25*GeV);
elfs.acceptIdPair(PID::ELECTRON);
declare(elfs, "LeadingElectrons");
book(_h_jet_multiplicity ,1, 1, 1);
book(_h_jet_pT_cross_section_incl_1jet ,2, 1, 1);
book(_h_jet_pT_cross_section_incl_2jet ,3, 1, 1);
}
/// Do the analysis
void analyze(const Event & event) {
- const double weight = 1.0;
-
// Skip if the event is empty
const FinalState& fs = apply<FinalState>(event, "FS");
if (fs.empty()) {
MSG_DEBUG("Skipping event " << numEvents() << " because no final state pair found");
vetoEvent;
}
// Find the Z candidates
const FinalState & electronfs = apply<FinalState>(event, "LeadingElectrons");
std::vector<std::pair<Particle, Particle> > Z_candidates;
Particles all_els=electronfs.particles();
for (size_t i=0; i<all_els.size(); ++i) {
for (size_t j=i+1; j<all_els.size(); ++j) {
bool candidate=true;
double mZ = FourMomentum(all_els[i].momentum()+all_els[j].momentum()).mass()/GeV;
if (mZ < 66.0 || mZ > 116.0) {
candidate = false;
}
double abs_eta_0 = fabs(all_els[i].eta());
double abs_eta_1 = fabs(all_els[j].eta());
if (abs_eta_1 < abs_eta_0) {
double tmp = abs_eta_0;
abs_eta_0 = abs_eta_1;
abs_eta_1 = tmp;
}
if (abs_eta_0 > 1.0) {
candidate = false;
}
if (!(abs_eta_1 < 1.0 || (inRange(abs_eta_1, 1.2, 2.8)))) {
candidate = false;
}
if (candidate) {
Z_candidates.push_back(make_pair(all_els[i], all_els[j]));
}
}
}
if (Z_candidates.size() != 1) {
MSG_DEBUG("Skipping event " << numEvents() << " because no unique electron pair found ");
vetoEvent;
}
// Now build the jets on a FS without the electrons from the Z (including QED radiation)
Particles jetparts;
for (const Particle& p : fs.particles()) {
bool copy = true;
if (p.pid() == PID::PHOTON) {
FourMomentum p_e0 = Z_candidates[0].first.momentum();
FourMomentum p_e1 = Z_candidates[0].second.momentum();
FourMomentum p_P = p.momentum();
if (deltaR(p_e0, p_P) < 0.2) copy = false;
if (deltaR(p_e1, p_P) < 0.2) copy = false;
} else {
if (p.genParticle()->barcode() == Z_candidates[0].first.genParticle()->barcode()) copy = false;
if (p.genParticle()->barcode() == Z_candidates[0].second.genParticle()->barcode()) copy = false;
}
if (copy) jetparts.push_back(p);
}
// Proceed to lepton dressing
const PseudoJets pjs = mkPseudoJets(jetparts);
const auto jplugin = make_shared<fastjet::CDFMidPointPlugin>(0.7, 0.5, 1.0);
const Jets jets_all = mkJets(fastjet::ClusterSequence(pjs, jplugin.get()).inclusive_jets());
const Jets jets_cut = sortByPt(filterBy(jets_all, Cuts::pT > 30*GeV && Cuts::abseta < 2.1));
// FastJets jetpro(FastJets::CDFMIDPOINT, 0.7);
// jetpro.calc(jetparts);
// // Take jets with pt > 30, |eta| < 2.1:
// const Jets& jets = jetpro.jets();
// Jets jets_cut;
// foreach (const Jet& j, jets) {
// if (j.pT()/GeV > 30.0 && j.abseta() < 2.1) {
// jets_cut.push_back(j);
// }
// }
// // Sort by pT:
// sort(jets_cut.begin(), jets_cut.end(), cmpMomByPt);
// Return if there are no jets:
MSG_DEBUG("Num jets above 30 GeV = " << jets_cut.size());
if (jets_cut.empty()) {
MSG_DEBUG("No jets pass cuts ");
vetoEvent;
}
// Cut on Delta R between Z electrons and *all* jets
for (const Jet& j : jets_cut) {
if (deltaR(Z_candidates[0].first, j) < 0.7) vetoEvent;
if (deltaR(Z_candidates[0].second, j) < 0.7) vetoEvent;
}
// Fill histograms
for (size_t njet=1; njet<=jets_cut.size(); ++njet) {
- _h_jet_multiplicity->fill(njet, weight);
+ _h_jet_multiplicity->fill(njet);
}
for (const Jet& j : jets_cut) {
if (jets_cut.size() > 0) {
- _h_jet_pT_cross_section_incl_1jet->fill(j.pT(), weight);
+ _h_jet_pT_cross_section_incl_1jet->fill(j.pT());
}
if (jets_cut.size() > 1) {
- _h_jet_pT_cross_section_incl_2jet->fill(j.pT(), weight);
+ _h_jet_pT_cross_section_incl_2jet->fill(j.pT());
}
}
}
/// Rescale histos
void finalize() {
const double invlumi = crossSection()/femtobarn/sumOfWeights();
scale(_h_jet_multiplicity, invlumi);
scale(_h_jet_pT_cross_section_incl_1jet, invlumi);
scale(_h_jet_pT_cross_section_incl_2jet, invlumi);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet_multiplicity;
Histo1DPtr _h_jet_pT_cross_section_incl_1jet;
Histo1DPtr _h_jet_pT_cross_section_incl_2jet;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S7540469);
}
diff --git a/analyses/pluginCDF/CDF_2008_S7541902.cc b/analyses/pluginCDF/CDF_2008_S7541902.cc
--- a/analyses/pluginCDF/CDF_2008_S7541902.cc
+++ b/analyses/pluginCDF/CDF_2008_S7541902.cc
@@ -1,193 +1,194 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include <algorithm>
namespace Rivet {
/// @brief CDF jet pT and multiplicity distributions in W + jets events
///
/// This CDF analysis provides jet pT distributions for 4 jet multiplicity bins
/// as well as the jet multiplicity distribution in W + jets events.
/// e-Print: arXiv:0711.4044 [hep-ex]
class CDF_2008_S7541902 : public Analysis {
public:
/// Constructor
CDF_2008_S7541902()
: Analysis("CDF_2008_S7541902"),
_electronETCut(20.0*GeV), _electronETACut(1.1),
_eTmissCut(30.0*GeV), _mTCut(20.0*GeV),
- _jetEtCutA(20.0*GeV), _jetEtCutB(25.0*GeV), _jetETA(2.0),
- _sumW(0)
+ _jetEtCutA(20.0*GeV), _jetEtCutB(25.0*GeV), _jetETA(2.0)
{ }
/// @name Analysis methods
//@{
void init() {
// Set up projections
// Basic FS
FinalState fs(-3.6, 3.6);
declare(fs, "FS");
// Create a final state with any e-nu pair with invariant mass 65 -> 95 GeV and ET > 20 (W decay products)
vector<pair<PdgId,PdgId> > vids;
vids += make_pair(PID::ELECTRON, PID::NU_EBAR);
vids += make_pair(PID::POSITRON, PID::NU_E);
FinalState fs2(-3.6, 3.6, 20*GeV);
InvMassFinalState invfs(fs2, vids, 65*GeV, 95*GeV);
declare(invfs, "INVFS");
// Make a final state without the W decay products for jet clustering
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(invfs);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::CDFJETCLU, 0.4), "Jets");
// Book histograms
for (int i = 0 ; i < 4 ; ++i) {
book(_histJetEt[i] ,1+i, 1, 1);
book(_histJetMultRatio[i], 5, 1, i+1, true);
/// @todo These would be better off as YODA::Counter until finalize()
book(_histJetMult[i] ,6+i, 1, 1); // _sumW is essentially the 0th "histo" counter
}
+
+ book(_sumW,"sumW");
}
/// Do the analysis
void analyze(const Event& event) {
// Get the W decay products (electron and neutrino)
const InvMassFinalState& invMassFinalState = apply<InvMassFinalState>(event, "INVFS");
const Particles& wDecayProducts = invMassFinalState.particles();
FourMomentum electronP, neutrinoP;
bool gotElectron(false), gotNeutrino(false);
foreach (const Particle& p, wDecayProducts) {
FourMomentum p4 = p.momentum();
if (p4.Et() > _electronETCut && fabs(p4.eta()) < _electronETACut && p.abspid() == PID::ELECTRON) {
electronP = p4;
gotElectron = true;
}
else if (p4.Et() > _eTmissCut && p.abspid() == PID::NU_E) {
neutrinoP = p4;
gotNeutrino = true;
}
}
// Veto event if the electron or MET cuts fail
if (!gotElectron || !gotNeutrino) vetoEvent;
// Veto event if the MTR cut fails
double mT2 = 2.0 * ( electronP.pT()*neutrinoP.pT() - electronP.px()*neutrinoP.px() - electronP.py()*neutrinoP.py() );
if (sqrt(mT2) < _mTCut ) vetoEvent;
// Get the jets
const JetAlg& jetProj = apply<FastJets>(event, "Jets");
Jets theJets = jetProj.jets(cmpMomByEt, Cuts::Et > _jetEtCutA);
size_t njetsA(0), njetsB(0);
foreach (const Jet& j, theJets) {
const FourMomentum pj = j.momentum();
if (fabs(pj.rapidity()) < _jetETA) {
// Fill differential histograms for top 4 jets with Et > 20
if (njetsA < 4 && pj.Et() > _jetEtCutA) {
++njetsA;
- _histJetEt[njetsA-1]->fill(pj.Et(), 1.0);
+ _histJetEt[njetsA-1]->fill(pj.Et());
}
// Count number of jets with Et > 25 (for multiplicity histograms)
if (pj.Et() > _jetEtCutB) ++njetsB;
}
}
// Increment event counter
- _sumW += 1.0;
+ _sumW->fill();
// Jet multiplicity
for (size_t i = 1; i <= njetsB; ++i) {
/// @todo This isn't really a histogram: replace with a YODA::Counter when we have one!
- _histJetMult[i-1]->fill(1960., 1.0);
+ _histJetMult[i-1]->fill(1960.);
if (i == 4) break;
}
}
/// Finalize
void finalize() {
// Fill the 0th ratio histogram specially
/// @todo This special case for 1-to-0 will disappear if we use Counters for all mults including 0.
if (_sumW > 0) {
const YODA::Histo1D::Bin& b0 = _histJetMult[0]->bin(0);
double ratio = b0.area()/_sumW;
double frac_err = 1/_sumW; ///< This 1/sqrt{N} error treatment isn't right for weighted events: use YODA::Counter
if (b0.area() > 0) frac_err = sqrt( sqr(frac_err) + sqr(b0.areaErr()/b0.area()) );
_histJetMultRatio[0]->point(0).setY(ratio, ratio*frac_err);
}
// Loop over the non-zero multiplicities
for (size_t i = 0; i < 3; ++i) {
const YODA::Histo1D::Bin& b1 = _histJetMult[i]->bin(0);
const YODA::Histo1D::Bin& b2 = _histJetMult[i+1]->bin(0);
if (b1.area() == 0.0) continue;
double ratio = b2.area()/b1.area();
double frac_err = b1.areaErr()/b1.area();
if (b2.area() > 0) frac_err = sqrt( sqr(frac_err) + sqr(b2.areaErr()/b2.area()) );
_histJetMultRatio[i+1]->point(0).setY(ratio, ratio*frac_err);
}
// Normalize the non-ratio histograms
for (size_t i = 0; i < 4; ++i) {
scale(_histJetEt[i], crossSection()/picobarn/sumOfWeights());
scale(_histJetMult[i], crossSection()/picobarn/sumOfWeights());
}
}
//@}
private:
/// @name Cuts
//@{
/// Cut on the electron ET:
double _electronETCut;
/// Cut on the electron ETA:
double _electronETACut;
/// Cut on the missing ET
double _eTmissCut;
/// Cut on the transverse mass squared
double _mTCut;
/// Cut on the jet ET for differential cross sections
double _jetEtCutA;
/// Cut on the jet ET for jet multiplicity
double _jetEtCutB;
/// Cut on the jet ETA
double _jetETA;
//@}
/// @name Histograms
//@{
Histo1DPtr _histJetEt[4];
Histo1DPtr _histJetMultNorm;
Scatter2DPtr _histJetMultRatio[4];
Histo1DPtr _histJetMult[4];
- double _sumW;
+ CounterPtr _sumW;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S7541902);
}
diff --git a/analyses/pluginCDF/CDF_2008_S7782535.cc b/analyses/pluginCDF/CDF_2008_S7782535.cc
--- a/analyses/pluginCDF/CDF_2008_S7782535.cc
+++ b/analyses/pluginCDF/CDF_2008_S7782535.cc
@@ -1,141 +1,140 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/JetShape.hh"
namespace Rivet {
/// @brief CDF Run II b-jet shape paper
class CDF_2008_S7782535 : public Analysis {
public:
/// Constructor
CDF_2008_S7782535() : Analysis("CDF_2008_S7782535")
{
}
/// @name Analysis methods
//@{
void init() {
// Set up projections
const FinalState fs(-3.6, 3.6);
declare(fs, "FS");
FastJets jetproj(fs, FastJets::CDFMIDPOINT, 0.7);
jetproj.useInvisibles();
declare(jetproj, "Jets");
// Book histograms and corresponding jet shape projections
_ptedges = {{ 52, 80, 104, 142, 300 }};
for (size_t i = 0; i < 4; ++i) {
stringstream ss; ss << "JetShape" << i;
const string pname = ss.str();
_jsnames_pT[i] = pname;
const JetShape jsp(jetproj, 0.0, 0.7, 7, _ptedges[i], _ptedges[i+1], 0.0, 0.7, RAPIDITY);
declare(jsp, pname);
book(_h_Psi_pT[i] ,i+1, 2, 1);
}
book(_h_OneMinusPsi_vs_pT, 5, 1, 1);
}
// Do the analysis
void analyze(const Event& event) {
const FastJets& fjs = apply<FastJets>(event, "Jets");
const Jets& jets = fjs.jets(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) && Cuts::absrap < 0.7);
if (jets.size() == 0) {
MSG_DEBUG("No jets found in required pT range");
vetoEvent;
}
// Filter to just get a vector of b-jets
Jets bjets;
foreach (const Jet& j, jets) {
if (j.bTagged()) bjets += j;
}
if (bjets.empty()) {
MSG_DEBUG("No b-jet axes in acceptance");
vetoEvent;
}
// Bin b-jets in pT
Jets bjets_ptbinned[4];
foreach (const Jet& bj, bjets) {
const FourMomentum pbj = bj.momentum();
const int ipt = binIndex(pbj.pT(), _ptedges);
if (ipt == -1) continue; ///< Out of pT range (somehow!)
bjets_ptbinned[ipt] += bj;
}
// Loop over jet pT bins and fill shape profiles
- const double weight = 1.0;
for (size_t ipt = 0; ipt < 4; ++ipt) {
if (bjets_ptbinned[ipt].empty()) continue;
// Don't use the cached result: copy construct and calculate for provided b-jets only
JetShape jsipt = apply<JetShape>(event, _jsnames_pT[ipt]);
jsipt.calc(bjets_ptbinned[ipt]);
for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) {
for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) {
const double r_Psi = jsipt.rBinMax(rbin);
- _h_Psi_pT[ipt]->fill(r_Psi/0.7, jsipt.intJetShape(ijet, rbin), weight);
+ _h_Psi_pT[ipt]->fill(r_Psi/0.7, jsipt.intJetShape(ijet, rbin));
}
}
}
}
/// Finalize
void finalize() {
// Construct final 1-Psi(0.3/0.7) profile from Psi profiles
for (size_t i = 0; i < _ptedges.size()-1; ++i) {
// Get entry for rad_Psi = 0.2 bin
Profile1DPtr ph_i = _h_Psi_pT[i];
const double ex = 0.5*(_ptedges[i+1] - _ptedges[i]);
const double x = _ptedges[i] + ex;
double y = 0; // This is to protect against exceptions
double ey = 0; // thrown by YODA when calling mean and
if (ph_i->bin(1).effNumEntries() > 1) { // stdErr at
y = 1.0 - ph_i->bin(1).mean(); // low stats
ey= ph_i->bin(1).stdErr();
}
_h_OneMinusPsi_vs_pT->addPoint(x, y, ex, ey);
}
}
//@}
private:
/// @name Analysis data
//@{
/// Jet \f$ p_\perp\f$ bins.
vector<double> _ptedges; // This can't be a raw array if we want to initialise it non-painfully
/// JetShape projection name for each \f$p_\perp\f$ bin.
string _jsnames_pT[4];
//@}
/// @name Histograms
//@{
Profile1DPtr _h_Psi_pT[4];
Scatter2DPtr _h_OneMinusPsi_vs_pT;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S7782535);
}
diff --git a/analyses/pluginCDF/CDF_2008_S8093652.cc b/analyses/pluginCDF/CDF_2008_S8093652.cc
--- a/analyses/pluginCDF/CDF_2008_S8093652.cc
+++ b/analyses/pluginCDF/CDF_2008_S8093652.cc
@@ -1,73 +1,71 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief CDF dijet mass spectrum
class CDF_2008_S8093652 : public Analysis {
public:
/// Constructor
CDF_2008_S8093652()
: Analysis("CDF_2008_S8093652")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
FinalState fs;
FastJets conefinder(fs, FastJets::CDFMIDPOINT, 0.7);
declare(conefinder, "ConeFinder");
book(_h_m_dijet ,1, 1, 1);
}
/// Do the analysis
void analyze(const Event & e) {
- const double weight = 1.0;
-
const JetAlg& jetpro = apply<JetAlg>(e, "ConeFinder");
const Jets& jets = jetpro.jetsByPt();
if (jets.size() < 2) vetoEvent;
const FourMomentum j0(jets[0].momentum());
const FourMomentum j1(jets[1].momentum());
if (j1.absrap() > 1.0 || j0.absrap() > 1.0) {
vetoEvent;
}
double mjj = FourMomentum(j0+j1).mass();
- _h_m_dijet->fill(mjj, weight);
+ _h_m_dijet->fill(mjj);
}
/// Finalize
void finalize() {
scale(_h_m_dijet, crossSection()/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_m_dijet;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S8093652);
}
diff --git a/analyses/pluginCDF/CDF_2008_S8095620.cc b/analyses/pluginCDF/CDF_2008_S8095620.cc
--- a/analyses/pluginCDF/CDF_2008_S8095620.cc
+++ b/analyses/pluginCDF/CDF_2008_S8095620.cc
@@ -1,187 +1,187 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
namespace Rivet {
/// @brief CDF Run II Z + b-jet cross-section measurement
class CDF_2008_S8095620 : public Analysis {
public:
/// Constructor.
/// jet cuts: |eta| <= 1.5
CDF_2008_S8095620()
: Analysis("CDF_2008_S8095620"),
- _Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(3.2),
- _sumWeightSelected(0.0)
- {
- }
+ _Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(3.2)
+ { }
/// @name Analysis methods
//@{
void init() {
// Set up projections
const FinalState fs(-3.2, 3.2);
declare(fs, "FS");
// Create a final state with any e+e- or mu+mu- pair with
// invariant mass 76 -> 106 GeV and ET > 18 (Z decay products)
vector<pair<PdgId,PdgId> > vids;
vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON));
vids.push_back(make_pair(PID::MUON, PID::ANTIMUON));
FinalState fs2(-3.2, 3.2);
InvMassFinalState invfs(fs2, vids, 76*GeV, 106*GeV);
declare(invfs, "INVFS");
// Make a final state without the Z decay products for jet clustering
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(invfs);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::CDFMIDPOINT, 0.7), "Jets");
// Book histograms
book(_dStot ,1, 1, 1);
book(_dSdET ,2, 1, 1);
book(_dSdETA ,3, 1, 1);
book(_dSdZpT ,4, 1, 1);
book(_dSdNJet ,5, 1, 1);
book(_dSdNbJet ,6, 1, 1);
+
+ book(_sumWeightSelected,"sumWeightSelected");
}
// Do the analysis
void analyze(const Event& event) {
// Check we have an l+l- pair that passes the kinematic cuts
// Get the Z decay products (mu+mu- or e+e- pair)
const InvMassFinalState& invMassFinalState = apply<InvMassFinalState>(event, "INVFS");
const Particles& ZDecayProducts = invMassFinalState.particles();
// make sure we have 2 Z decay products (mumu or ee)
if (ZDecayProducts.size() < 2) vetoEvent;
//new cuts
double Lep1Pt = ZDecayProducts[0].perp();
double Lep2Pt = ZDecayProducts[1].perp();
double Lep1Eta = fabs(ZDecayProducts[0].rapidity());
double Lep2Eta = fabs(ZDecayProducts[1].rapidity());
if (Lep1Eta > _LepEtaCut || Lep2Eta > _LepEtaCut) vetoEvent;
if (ZDecayProducts[0].abspid()==13 &&
((Lep1Eta > 1.5 || Lep2Eta > 1.5) || (Lep1Eta > 1.0 && Lep2Eta > 1.0))) {
vetoEvent;
}
if (Lep1Pt > Lep2Pt) {
if (Lep1Pt < _Lep1PtCut || Lep2Pt < _Lep2PtCut) vetoEvent;
}
else {
if (Lep1Pt < _Lep2PtCut || Lep2Pt < _Lep1PtCut) vetoEvent;
}
- _sumWeightSelected += 1.0;
+ _sumWeightSelected->fill();
/// @todo: write out a warning if there are more than two decay products
FourMomentum Zmom = ZDecayProducts[0].momentum() + ZDecayProducts[1].momentum();
// Put all b-quarks in a vector
/// @todo Use a b-hadron search rather than b-quarks for tagging
Particles bquarks;
foreach (const GenParticle* p, particles(event.genEvent())) {
if (std::abs(p->pdg_id()) == PID::BQUARK) {
bquarks += Particle(*p);
}
}
// Get jets
const FastJets& jetpro = apply<FastJets>(event, "Jets");
MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size());
const PseudoJets& jets = jetpro.pseudoJetsByPt();
MSG_DEBUG("jetlist size = " << jets.size());
int numBJet = 0;
int numJet = 0;
// for each b-jet plot the ET and the eta of the jet, normalise to the total cross section at the end
// for each event plot N jet and pT(Z), normalise to the total cross section at the end
for (PseudoJets::const_iterator jt = jets.begin(); jt != jets.end(); ++jt) {
// select jets that pass the kinematic cuts
if (jt->perp() > _JetPtCut && fabs(jt->rapidity()) <= _JetEtaCut) {
numJet++;
// does the jet contain a b-quark?
bool bjet = false;
foreach (const Particle& bquark, bquarks) {
if (deltaR(jt->rapidity(), jt->phi(), bquark.rapidity(),bquark.phi()) <= _Rjet) {
bjet = true;
break;
}
} // end loop around b-jets
if (bjet) {
numBJet++;
- _dSdET->fill(jt->perp(),1.0);
- _dSdETA->fill(fabs(jt->rapidity()),1.0);
+ _dSdET->fill(jt->perp());
+ _dSdETA->fill(fabs(jt->rapidity()));
}
}
} // end loop around jets
// wasn't asking for b-jets before!!!!
- if(numJet > 0 && numBJet > 0) _dSdNJet->fill(numJet,1.0);
+ if(numJet > 0 && numBJet > 0) _dSdNJet->fill(numJet);
if(numBJet > 0) {
- _dStot->fill(1960.0,1.0);
- _dSdNbJet->fill(numBJet,1.0);
- _dSdZpT->fill(Zmom.pT(),1.0);
+ _dStot->fill(1960.0);
+ _dSdNbJet->fill(numBJet);
+ _dSdZpT->fill(Zmom.pT());
}
}
// Finalize
void finalize() {
// normalise histograms
// scale by 1 / the sum-of-weights of events that pass the Z cuts
// since the cross sections are normalized to the inclusive
// Z cross sections.
double Scale = 1.0;
if (_sumWeightSelected != 0.0) Scale = 1.0/_sumWeightSelected;
scale(_dStot,Scale);
scale(_dSdET,Scale);
scale(_dSdETA,Scale);
scale(_dSdNJet,Scale);
scale(_dSdNbJet,Scale);
scale(_dSdZpT,Scale);
}
//@}
private:
double _Rjet;
double _JetPtCut;
double _JetEtaCut;
double _Lep1PtCut;
double _Lep2PtCut;
double _LepEtaCut;
- double _sumWeightSelected;
+ CounterPtr _sumWeightSelected;
//@{
/// Histograms
Histo1DPtr _dStot;
Histo1DPtr _dSdET;
Histo1DPtr _dSdETA;
Histo1DPtr _dSdNJet;
Histo1DPtr _dSdNbJet;
Histo1DPtr _dSdZpT;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S8095620);
}
diff --git a/analyses/pluginCDF/CDF_2009_I856131.cc b/analyses/pluginCDF/CDF_2009_I856131.cc
--- a/analyses/pluginCDF/CDF_2009_I856131.cc
+++ b/analyses/pluginCDF/CDF_2009_I856131.cc
@@ -1,87 +1,86 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @brief CDF Z boson rapidity measurement
class CDF_2009_I856131 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_2009_I856131()
: Analysis("CDF_2009_I856131")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
/// Initialise and register projections here
// this seems to have been corrected completely for all selection cuts,
// i.e. eta cuts and pT cuts on leptons.
ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON,
66*GeV, 116*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder, "ZFinder");
/// Book histograms here
book(_h_xs ,1, 1, 1);
book(_h_yZ ,2, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const ZFinder& zfinder = apply<ZFinder>(event, "ZFinder");
if (zfinder.bosons().size() == 1) {
- const double weight = 1.0;
- _h_yZ->fill(fabs(zfinder.bosons()[0].rapidity()), weight);
- _h_xs->fill(1960, weight);
+ _h_yZ->fill(fabs(zfinder.bosons()[0].rapidity()));
+ _h_xs->fill(1960);
} else {
MSG_DEBUG("no unique lepton pair found.");
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_xs, crossSection()/sumOfWeights());
// Data seems to have been normalized for the avg of the two sides
// (+ve & -ve rapidity) rather than the sum, hence the 0.5:
scale(_h_yZ, 0.5*crossSection()/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_yZ;
Histo1DPtr _h_xs;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2009_I856131);
}
diff --git a/analyses/pluginCDF/CDF_2009_NOTE_9936.cc b/analyses/pluginCDF/CDF_2009_NOTE_9936.cc
--- a/analyses/pluginCDF/CDF_2009_NOTE_9936.cc
+++ b/analyses/pluginCDF/CDF_2009_NOTE_9936.cc
@@ -1,72 +1,70 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun2.hh"
namespace Rivet {
class CDF_2009_NOTE_9936 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_2009_NOTE_9936()
: Analysis("CDF_2009_NOTE_9936")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(TriggerCDFRun2(), "Trigger");
declare(ChargedFinalState(-1.0, 1.0, 0.4*GeV), "CFS");
book(_hist_nch ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// MinBias Trigger
const bool trigger = apply<TriggerCDFRun2>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
- //_sumWPassed += 1.0;
- const double weight = 1.0;
// Get events charged multiplicity and fill histogram
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
- _hist_nch->fill(cfs.size(), weight);
+ _hist_nch->fill(cfs.size());
}
/// Normalise histograms etc., after the run
void finalize() {
normalize(_hist_nch);
}
//@}
private:
Histo1DPtr _hist_nch;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2009_NOTE_9936);
}
diff --git a/analyses/pluginCDF/CDF_2009_S8233977.cc b/analyses/pluginCDF/CDF_2009_S8233977.cc
--- a/analyses/pluginCDF/CDF_2009_S8233977.cc
+++ b/analyses/pluginCDF/CDF_2009_S8233977.cc
@@ -1,124 +1,122 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun2.hh"
namespace Rivet {
/// @brief CDF Run II min-bias cross-section
/// @author Hendrik Hoeth
///
/// Measurement of \f$ \langle p_T \rangle \f$ vs. \f$ n_\text{ch} \f$,
/// the track \f$ p_T \f$ distribution, and the \f$ \sum E_T \f$ distribution.
/// Particles are selected within |eta|<1 and with pT>0.4 GeV.
/// There is no pT cut for the \f$ \sum E_T \f$ measurement.
///
/// @par Run conditions
///
/// @arg \f$ \sqrt{s} = \f$ 1960 GeV
/// @arg Run with generic QCD events.
/// @arg Set particles with c*tau > 10 mm stable
class CDF_2009_S8233977 : public Analysis {
public:
/// Constructor
CDF_2009_S8233977()
- : Analysis("CDF_2009_S8233977"),
- _sumWeightSelected(0.0)
+ : Analysis("CDF_2009_S8233977")
{ }
/// @name Analysis methods
//@{
/// Book histograms and projections
void init() {
declare(TriggerCDFRun2(), "Trigger");
declare(FinalState(-1.0, 1.0, 0.0*GeV), "EtFS");
declare(ChargedFinalState(-1.0, 1.0, 0.4*GeV), "CFS");
book(_hist_pt ,1, 1, 1);
book(_hist_pt_vs_multiplicity ,2, 1, 1);
book(_hist_sumEt ,3, 1, 1);
+
+ book(_sumWeightSelected,"_sumWeightSelected");
}
/// Do the analysis
void analyze(const Event& evt) {
// MinBias Trigger
const bool trigger = apply<TriggerCDFRun2>(evt, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
- // Get the event weight
- const double weight = 1.0;
-
/// @todo The pT and sum(ET) distributions look slightly different from
/// Niccolo's Monte Carlo plots. Still waiting for his answer.
const ChargedFinalState& trackfs = apply<ChargedFinalState>(evt, "CFS");
const size_t numParticles = trackfs.size();
foreach (const Particle& p, trackfs.particles()) {
const double pT = p.pT() / GeV;
- _hist_pt_vs_multiplicity->fill(numParticles, pT, weight);
+ _hist_pt_vs_multiplicity->fill(numParticles, pT);
// The weight for entries in the pT distribution should be weight/(pT*dPhi*dy).
//
// - dPhi = 2*PI
//
// - dy depends on the pT: They calculate y assuming the particle has the
// pion mass and assuming that eta=1:
// dy = 2 * 1/2 * ln [(sqrt(m^2 + (a+1)*pT^2) + a*pT) / (sqrt(m^2 + (a+1)*pT^2) - a*pT)]
// with a = sinh(1).
//
// sinh(1) = 1.1752012
// m(charged pion)^2 = (139.57 MeV)^2 = 0.019479785 GeV^2
const double sinh1 = 1.1752012;
const double apT = sinh1 * pT;
const double mPi = 139.57*MeV;
const double root = sqrt(mPi*mPi + (1+sinh1)*pT*pT);
const double dy = std::log((root+apT)/(root-apT));
const double dphi = TWOPI;
- _hist_pt->fill(pT, weight/(pT*dphi*dy));
+ _hist_pt->fill(pT, 1.0/(pT*dphi*dy));
}
// Calc sum(Et) from calo particles
const FinalState& etfs = apply<FinalState>(evt, "EtFS");
double sumEt = 0.0;
foreach (const Particle& p, etfs.particles()) {
sumEt += p.Et();
}
- _hist_sumEt->fill(sumEt, weight);
- _sumWeightSelected += 1.0;
+ _hist_sumEt->fill(sumEt);
+ _sumWeightSelected->fill();
}
/// Normalize histos
void finalize() {
- scale(_hist_sumEt, crossSection()/millibarn/(4*M_PI*_sumWeightSelected));
+ scale(_hist_sumEt, crossSection()/millibarn/(4*M_PI*double(_sumWeightSelected)));
scale(_hist_pt, crossSection()/millibarn/_sumWeightSelected);
MSG_DEBUG("sumOfWeights() = " << sumOfWeights());
- MSG_DEBUG("_sumWeightSelected = " << _sumWeightSelected);
+ MSG_DEBUG("_sumWeightSelected = " << double(_sumWeightSelected));
}
//@}
private:
- double _sumWeightSelected;
+ CounterPtr _sumWeightSelected;
Profile1DPtr _hist_pt_vs_multiplicity;
Histo1DPtr _hist_pt;
Histo1DPtr _hist_sumEt;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2009_S8233977);
}
diff --git a/analyses/pluginCDF/CDF_2009_S8436959.cc b/analyses/pluginCDF/CDF_2009_S8436959.cc
--- a/analyses/pluginCDF/CDF_2009_S8436959.cc
+++ b/analyses/pluginCDF/CDF_2009_S8436959.cc
@@ -1,89 +1,87 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
namespace Rivet {
/// @brief CDF inclusive isolated prompt photon cross-section
class CDF_2009_S8436959 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
CDF_2009_S8436959()
: Analysis("CDF_2009_S8436959")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs;
declare(fs, "FS");
LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV));
photonfs.addParticleId(PID::PHOTON);
declare(photonfs, "LeadingPhoton");
book(_h_Et_photon ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
Particles fs = apply<FinalState>(event, "FS").particles();
Particles photons = apply<LeadingParticlesFinalState>(event, "LeadingPhoton").particles();
if (photons.size()!=1) {
vetoEvent;
}
FourMomentum leadingPhoton = photons[0].momentum();
double eta_P = leadingPhoton.eta();
double phi_P = leadingPhoton.phi();
FourMomentum mom_in_cone;
foreach (const Particle& p, fs) {
if (deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.4) {
mom_in_cone += p.momentum();
}
}
if ( (mom_in_cone.Et() - leadingPhoton.Et()) > 2.0*GeV) {
vetoEvent;
}
- _h_Et_photon->fill(leadingPhoton.Et(), weight);
+ _h_Et_photon->fill(leadingPhoton.Et());
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_Et_photon, crossSection()/sumOfWeights()/2.0);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_Et_photon;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2009_S8436959);
}
diff --git a/analyses/pluginCDF/CDF_2010_S8591881_DY.cc b/analyses/pluginCDF/CDF_2010_S8591881_DY.cc
--- a/analyses/pluginCDF/CDF_2010_S8591881_DY.cc
+++ b/analyses/pluginCDF/CDF_2010_S8591881_DY.cc
@@ -1,212 +1,209 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/ChargedLeptons.hh"
namespace Rivet {
/// @brief CDF Run II underlying event in Drell-Yan
/// @author Hendrik Hoeth
///
/// Measurement of the underlying event in Drell-Yan
/// \f$ Z/\gamma^* \to e^+ e^- \f$ and
/// \f$ Z/\gamma^* \to \mu^+ \mu^- \f$ events. The reconstructed
/// Z defines the \f$ \phi \f$ orientation. A Z mass window cut is applied.
///
/// @par Run conditions
///
/// @arg \f$ \sqrt{s} = \f$ 1960 GeV
/// @arg produce Drell-Yan events
/// @arg Set particles with c*tau > 10 mm stable
/// @arg Z decay mode: Z -> e+e- and Z -> mu+mu-
/// @arg gamma decay mode: gamma -> e+e- and gamma -> mu+mu-
/// @arg minimum invariant mass of the fermion pair coming from the Z/gamma: 70 GeV
class CDF_2010_S8591881_DY : public Analysis {
public:
/// Constructor
CDF_2010_S8591881_DY() : Analysis("CDF_2010_S8591881_DY")
{
}
/// @name Analysis methods
//@{
void init() {
// Set up projections
const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV);
const ChargedFinalState clfs(-1.0, 1.0, 20*GeV);
declare(cfs, "FS");
declare(ChargedLeptons(clfs), "CL");
// Book histograms
book(_hist_tnchg , 1, 1, 1);
book(_hist_pnchg , 1, 1, 2);
book(_hist_anchg , 1, 1, 3);
book(_hist_pmaxnchg , 2, 1, 1);
book(_hist_pminnchg , 2, 1, 2);
book(_hist_pdifnchg , 2, 1, 3);
book(_hist_tcptsum , 3, 1, 1);
book(_hist_pcptsum , 3, 1, 2);
book(_hist_acptsum , 3, 1, 3);
book(_hist_pmaxcptsum , 4, 1, 1);
book(_hist_pmincptsum , 4, 1, 2);
book(_hist_pdifcptsum , 4, 1, 3);
book(_hist_tcptave , 5, 1, 1);
book(_hist_pcptave , 5, 1, 2);
book(_hist_tcptmax , 6, 1, 1);
book(_hist_pcptmax , 6, 1, 2);
book(_hist_zptvsnchg , 7, 1, 1);
book(_hist_cptavevsnchg , 8, 1, 1);
book(_hist_cptavevsnchgsmallzpt , 9, 1, 1);
}
/// Do the analysis
void analyze(const Event& e) {
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 1) {
MSG_DEBUG("Failed multiplicity cut");
vetoEvent;
}
- // Get the event weight
- const double weight = 1.0;
-
// Get the leptons
const Particles& leptons = apply<ChargedLeptons>(e, "CL").chargedLeptons();
// We want exactly two leptons of the same flavour.
MSG_DEBUG("lepton multiplicity = " << leptons.size());
if (leptons.size() != 2 || leptons[0].pid() != -leptons[1].pid() ) vetoEvent;
// Lepton pT > 20 GeV
if (leptons[0].pT()/GeV <= 20 || leptons[1].pT()/GeV <= 20) vetoEvent;
// Lepton pair should have an invariant mass between 70 and 110 and |eta| < 6
const FourMomentum dilepton = leptons[0].momentum() + leptons[1].momentum();
if (!inRange(dilepton.mass()/GeV, 70., 110.) || fabs(dilepton.eta()) >= 6) vetoEvent;
MSG_DEBUG("Dilepton mass = " << dilepton.mass()/GeV << " GeV");
MSG_DEBUG("Dilepton pT = " << dilepton.pT()/GeV << " GeV");
// Calculate the observables
size_t numToward(0), numAway(0);
long int numTrans1(0), numTrans2(0);
double ptSumToward(0.0), ptSumTrans1(0.0), ptSumTrans2(0.0), ptSumAway(0.0);
double ptMaxToward(0.0), ptMaxTrans1(0.0), ptMaxTrans2(0.0), ptMaxAway(0.0);
const double phiZ = dilepton.azimuthalAngle();
const double pTZ = dilepton.pT();
/// @todo Replace with foreach
for (Particles::const_iterator p = fs.particles().begin(); p != fs.particles().end(); ++p) {
// Don't use the leptons
/// @todo Replace with PID::isLepton
if (abs(p->pid()) < 20) continue;
const double dPhi = deltaPhi(p->momentum().phi(), phiZ);
const double pT = p->pT();
double rotatedphi = p->momentum().phi() - phiZ;
while (rotatedphi < 0) rotatedphi += 2*PI;
if (dPhi < PI/3.0) {
ptSumToward += pT;
++numToward;
if (pT > ptMaxToward)
ptMaxToward = pT;
} else if (dPhi < 2*PI/3.0) {
if (rotatedphi <= PI) {
ptSumTrans1 += pT;
++numTrans1;
if (pT > ptMaxTrans1)
ptMaxTrans1 = pT;
}
else {
ptSumTrans2 += pT;
++numTrans2;
if (pT > ptMaxTrans2)
ptMaxTrans2 = pT;
}
} else {
ptSumAway += pT;
++numAway;
if (pT > ptMaxAway)
ptMaxAway = pT;
}
// We need to subtract the two leptons from the number of particles to get the correct multiplicity
- _hist_cptavevsnchg->fill(numParticles-2, pT, weight);
+ _hist_cptavevsnchg->fill(numParticles-2, pT);
if (pTZ < 10)
- _hist_cptavevsnchgsmallzpt->fill(numParticles-2, pT, weight);
+ _hist_cptavevsnchgsmallzpt->fill(numParticles-2, pT);
}
// Fill the histograms
- _hist_tnchg->fill(pTZ, numToward/(4*PI/3), weight);
- _hist_pnchg->fill(pTZ, (numTrans1+numTrans2)/(4*PI/3), weight);
- _hist_pmaxnchg->fill(pTZ, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3), weight);
- _hist_pminnchg->fill(pTZ, (numTrans1<numTrans2 ? numTrans1 : numTrans2)/(2*PI/3), weight);
- _hist_pdifnchg->fill(pTZ, abs(numTrans1-numTrans2)/(2*PI/3), weight);
- _hist_anchg->fill(pTZ, numAway/(4*PI/3), weight);
+ _hist_tnchg->fill(pTZ, numToward/(4*PI/3));
+ _hist_pnchg->fill(pTZ, (numTrans1+numTrans2)/(4*PI/3));
+ _hist_pmaxnchg->fill(pTZ, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3));
+ _hist_pminnchg->fill(pTZ, (numTrans1<numTrans2 ? numTrans1 : numTrans2)/(2*PI/3));
+ _hist_pdifnchg->fill(pTZ, abs(numTrans1-numTrans2)/(2*PI/3));
+ _hist_anchg->fill(pTZ, numAway/(4*PI/3));
- _hist_tcptsum->fill(pTZ, ptSumToward/(4*PI/3), weight);
- _hist_pcptsum->fill(pTZ, (ptSumTrans1+ptSumTrans2)/(4*PI/3), weight);
- _hist_pmaxcptsum->fill(pTZ, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/(2*PI/3), weight);
- _hist_pmincptsum->fill(pTZ, (ptSumTrans1<ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/(2*PI/3), weight);
- _hist_pdifcptsum->fill(pTZ, fabs(ptSumTrans1-ptSumTrans2)/(2*PI/3), weight);
- _hist_acptsum->fill(pTZ, ptSumAway/(4*PI/3), weight);
+ _hist_tcptsum->fill(pTZ, ptSumToward/(4*PI/3));
+ _hist_pcptsum->fill(pTZ, (ptSumTrans1+ptSumTrans2)/(4*PI/3));
+ _hist_pmaxcptsum->fill(pTZ, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/(2*PI/3));
+ _hist_pmincptsum->fill(pTZ, (ptSumTrans1<ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/(2*PI/3));
+ _hist_pdifcptsum->fill(pTZ, fabs(ptSumTrans1-ptSumTrans2)/(2*PI/3));
+ _hist_acptsum->fill(pTZ, ptSumAway/(4*PI/3));
if (numToward > 0) {
- _hist_tcptave->fill(pTZ, ptSumToward/numToward, weight);
- _hist_tcptmax->fill(pTZ, ptMaxToward, weight);
+ _hist_tcptave->fill(pTZ, ptSumToward/numToward);
+ _hist_tcptmax->fill(pTZ, ptMaxToward);
}
if ((numTrans1+numTrans2) > 0) {
- _hist_pcptave->fill(pTZ, (ptSumTrans1+ptSumTrans2)/(numTrans1+numTrans2), weight);
- _hist_pcptmax->fill(pTZ, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2), weight);
+ _hist_pcptave->fill(pTZ, (ptSumTrans1+ptSumTrans2)/(numTrans1+numTrans2));
+ _hist_pcptmax->fill(pTZ, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2));
}
// We need to subtract the two leptons from the number of particles to get the correct multiplicity
- _hist_zptvsnchg->fill(numParticles-2, pTZ, weight);
+ _hist_zptvsnchg->fill(numParticles-2, pTZ);
}
void finalize() {
}
//@}
private:
Profile1DPtr _hist_tnchg;
Profile1DPtr _hist_pnchg;
Profile1DPtr _hist_pmaxnchg;
Profile1DPtr _hist_pminnchg;
Profile1DPtr _hist_pdifnchg;
Profile1DPtr _hist_anchg;
Profile1DPtr _hist_tcptsum;
Profile1DPtr _hist_pcptsum;
Profile1DPtr _hist_pmaxcptsum;
Profile1DPtr _hist_pmincptsum;
Profile1DPtr _hist_pdifcptsum;
Profile1DPtr _hist_acptsum;
Profile1DPtr _hist_tcptave;
Profile1DPtr _hist_pcptave;
Profile1DPtr _hist_tcptmax;
Profile1DPtr _hist_pcptmax;
Profile1DPtr _hist_zptvsnchg;
Profile1DPtr _hist_cptavevsnchg;
Profile1DPtr _hist_cptavevsnchgsmallzpt;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2010_S8591881_DY);
}
diff --git a/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc b/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc
--- a/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc
+++ b/analyses/pluginCDF/CDF_2010_S8591881_QCD.cc
@@ -1,193 +1,190 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief CDF Run II underlying event in leading jet events
/// @author Hendrik Hoeth
///
/// Rick Field's measurement of the underlying event in "leading jet" events.
/// The leading jet (CDF midpoint \f$ R = 0.7 \f$) must be within \f$|\eta| < 2 \f$
/// and defines the "toward" phi direction. Particles are selected in
/// \f$ |\eta| < 1 \f$. For the \f$ p_\perp \f$-related observables there
/// is a \f$ p_\perp > 0.5 \f$ GeV cut. For \f$ \sum E_\perp \f$ there is no
/// \f$ p_\perp \f$ cut.
///
/// @par Run conditions
/// @arg \f$ \sqrt{s} = \f$ 1960 GeV
/// @arg Run with generic QCD events.
/// @arg Set particles with c*tau > 10 mm stable
/// @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the profile histograms:
/// @arg \f$ p_\perp^\text{min} = \f$ 0 (min bias), 10, 20, 50, 100, 150 GeV
/// @arg The corresponding merging points are at \f$ p_T = \f$ 0, 30, 50, 80, 130, 180 GeV
class CDF_2010_S8591881_QCD : public Analysis {
public:
/// Constructor
CDF_2010_S8591881_QCD()
: Analysis("CDF_2010_S8591881_QCD")
{
}
/// @name Analysis methods
//@{
void init() {
// Final state for the jet finding
const FinalState fsj(-4.0, 4.0, 0.0*GeV);
declare(fsj, "FSJ");
declare(FastJets(fsj, FastJets::CDFMIDPOINT, 0.7), "MidpointJets");
// Charged final state for the distributions
const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV);
declare(cfs, "CFS");
// Book histograms
book(_hist_tnchg ,10, 1, 1);
book(_hist_pnchg ,10, 1, 2);
book(_hist_anchg ,10, 1, 3);
book(_hist_pmaxnchg ,11, 1, 1);
book(_hist_pminnchg ,11, 1, 2);
book(_hist_pdifnchg ,11, 1, 3);
book(_hist_tcptsum ,12, 1, 1);
book(_hist_pcptsum ,12, 1, 2);
book(_hist_acptsum ,12, 1, 3);
book(_hist_pmaxcptsum ,13, 1, 1);
book(_hist_pmincptsum ,13, 1, 2);
book(_hist_pdifcptsum ,13, 1, 3);
book(_hist_pcptave ,14, 1, 1);
book(_hist_pcptmax ,15, 1, 1);
}
// Do the analysis
void analyze(const Event& e) {
/// @todo Implement Run II min bias trigger cf. CDF_2009?
const FinalState& fsj = apply<FinalState>(e, "FSJ");
if (fsj.particles().size() < 1) {
MSG_DEBUG("Failed multiplicity cut");
vetoEvent;
}
const Jets& jets = apply<FastJets>(e, "MidpointJets").jetsByPt();
MSG_DEBUG("Jet multiplicity = " << jets.size());
// We require the leading jet to be within |eta|<2
if (jets.size() < 1 || fabs(jets[0].eta()) >= 2) {
MSG_DEBUG("Failed leading jet cut");
vetoEvent;
}
const double jetphi = jets[0].phi();
const double jeteta = jets[0].eta();
const double jetpT = jets[0].pT();
MSG_DEBUG("Leading jet: pT = " << jetpT
<< ", eta = " << jeteta << ", phi = " << jetphi);
- // Get the event weight
- const double weight = 1.0;
-
// Get the final states to work with for filling the distributions
const FinalState& cfs = apply<ChargedFinalState>(e, "CFS");
size_t numOverall(0), numToward(0), numAway(0) ;
long int numTrans1(0), numTrans2(0);
double ptSumOverall(0.0), ptSumToward(0.0), ptSumTrans1(0.0), ptSumTrans2(0.0), ptSumAway(0.0);
double ptMaxOverall(0.0), ptMaxToward(0.0), ptMaxTrans1(0.0), ptMaxTrans2(0.0), ptMaxAway(0.0);
// Calculate all the charged stuff
foreach (const Particle& p, cfs.particles()) {
const double dPhi = deltaPhi(p.phi(), jetphi);
const double pT = p.pT();
const double phi = p.phi();
double rotatedphi = phi - jetphi;
while (rotatedphi < 0) rotatedphi += 2*PI;
ptSumOverall += pT;
++numOverall;
if (pT > ptMaxOverall) {
ptMaxOverall = pT;
}
if (dPhi < PI/3.0) {
ptSumToward += pT;
++numToward;
if (pT > ptMaxToward) ptMaxToward = pT;
}
else if (dPhi < 2*PI/3.0) {
if (rotatedphi <= PI) {
ptSumTrans1 += pT;
++numTrans1;
if (pT > ptMaxTrans1) ptMaxTrans1 = pT;
} else {
ptSumTrans2 += pT;
++numTrans2;
if (pT > ptMaxTrans2) ptMaxTrans2 = pT;
}
}
else {
ptSumAway += pT;
++numAway;
if (pT > ptMaxAway) ptMaxAway = pT;
}
} // end charged particle loop
// Fill the histograms
- _hist_tnchg->fill(jetpT/GeV, numToward/(4*PI/3), weight);
- _hist_pnchg->fill(jetpT/GeV, (numTrans1+numTrans2)/(4*PI/3), weight);
- _hist_pmaxnchg->fill(jetpT/GeV, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3), weight);
- _hist_pminnchg->fill(jetpT/GeV, (numTrans1<numTrans2 ? numTrans1 : numTrans2)/(2*PI/3), weight);
- _hist_pdifnchg->fill(jetpT/GeV, abs(numTrans1-numTrans2)/(2*PI/3), weight);
- _hist_anchg->fill(jetpT/GeV, numAway/(4*PI/3), weight);
+ _hist_tnchg->fill(jetpT/GeV, numToward/(4*PI/3));
+ _hist_pnchg->fill(jetpT/GeV, (numTrans1+numTrans2)/(4*PI/3));
+ _hist_pmaxnchg->fill(jetpT/GeV, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3));
+ _hist_pminnchg->fill(jetpT/GeV, (numTrans1<numTrans2 ? numTrans1 : numTrans2)/(2*PI/3));
+ _hist_pdifnchg->fill(jetpT/GeV, abs(numTrans1-numTrans2)/(2*PI/3));
+ _hist_anchg->fill(jetpT/GeV, numAway/(4*PI/3));
- _hist_tcptsum->fill(jetpT/GeV, ptSumToward/GeV/(4*PI/3), weight);
- _hist_pcptsum->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(4*PI/3), weight);
- _hist_pmaxcptsum->fill(jetpT/GeV, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/GeV/(2*PI/3), weight);
- _hist_pmincptsum->fill(jetpT/GeV, (ptSumTrans1<ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/GeV/(2*PI/3), weight);
- _hist_pdifcptsum->fill(jetpT/GeV, fabs(ptSumTrans1-ptSumTrans2)/GeV/(2*PI/3), weight);
- _hist_acptsum->fill(jetpT/GeV, ptSumAway/GeV/(4*PI/3), weight);
+ _hist_tcptsum->fill(jetpT/GeV, ptSumToward/GeV/(4*PI/3));
+ _hist_pcptsum->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(4*PI/3));
+ _hist_pmaxcptsum->fill(jetpT/GeV, (ptSumTrans1>ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/GeV/(2*PI/3));
+ _hist_pmincptsum->fill(jetpT/GeV, (ptSumTrans1<ptSumTrans2 ? ptSumTrans1 : ptSumTrans2)/GeV/(2*PI/3));
+ _hist_pdifcptsum->fill(jetpT/GeV, fabs(ptSumTrans1-ptSumTrans2)/GeV/(2*PI/3));
+ _hist_acptsum->fill(jetpT/GeV, ptSumAway/GeV/(4*PI/3));
if ((numTrans1+numTrans2) > 0) {
- _hist_pcptave->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(numTrans1+numTrans2), weight);
- _hist_pcptmax->fill(jetpT/GeV, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2)/GeV, weight);
+ _hist_pcptave->fill(jetpT/GeV, (ptSumTrans1+ptSumTrans2)/GeV/(numTrans1+numTrans2));
+ _hist_pcptmax->fill(jetpT/GeV, (ptMaxTrans1 > ptMaxTrans2 ? ptMaxTrans1 : ptMaxTrans2)/GeV);
}
}
void finalize() {
}
//@}
private:
Profile1DPtr _hist_tnchg;
Profile1DPtr _hist_pnchg;
Profile1DPtr _hist_anchg;
Profile1DPtr _hist_pmaxnchg;
Profile1DPtr _hist_pminnchg;
Profile1DPtr _hist_pdifnchg;
Profile1DPtr _hist_tcptsum;
Profile1DPtr _hist_pcptsum;
Profile1DPtr _hist_acptsum;
Profile1DPtr _hist_pmaxcptsum;
Profile1DPtr _hist_pmincptsum;
Profile1DPtr _hist_pdifcptsum;
Profile1DPtr _hist_pcptave;
Profile1DPtr _hist_pcptmax;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2010_S8591881_QCD);
}
diff --git a/analyses/pluginCDF/CDF_2012_I1124333.cc b/analyses/pluginCDF/CDF_2012_I1124333.cc
--- a/analyses/pluginCDF/CDF_2012_I1124333.cc
+++ b/analyses/pluginCDF/CDF_2012_I1124333.cc
@@ -1,84 +1,82 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @ CDF Run II Z \f$ p_\perp \f$ in Drell-Yan events
/// @author Simone Amoroso
class CDF_2012_I1124333 : public Analysis {
public:
/// Constructor
CDF_2012_I1124333()
: Analysis("CDF_2012_I1124333")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
/// Initialise and register projections here
ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, 66*GeV, 116*GeV, 0.0, ZFinder::NOCLUSTER);
declare(zfinder, "ZFinder");
/// Book histograms here, e.g.:
//book( _hist_z_xs ,1, 1, 1);
book(_hist_zpt ,2, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
/// @todo Do the event by event analysis here
const ZFinder& zfinder = apply<ZFinder>(event, "ZFinder");
if (zfinder.bosons().size() != 1) {
MSG_DEBUG("Num e+ e- pairs found = " << zfinder.bosons().size());
vetoEvent;
}
const FourMomentum& pZ = zfinder.bosons()[0].momentum();
if (pZ.mass2() < 0) {
MSG_DEBUG("Negative Z mass**2 = " << pZ.mass2()/GeV2 << "!");
vetoEvent;
}
MSG_DEBUG("Dilepton mass = " << pZ.mass()/GeV << " GeV");
- _hist_zpt->fill(pZ.pT(), weight);
- // _hist_z_xs->fill(1, weight);
+ _hist_zpt->fill(pZ.pT());
+ // _hist_z_xs->fill(1);
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_hist_zpt, crossSection()/picobarn/sumOfWeights());
}
//@}
private:
// Data members like post-cuts event weight counters go here
/// @name Histograms
//@{
Histo1DPtr _hist_zpt;
// Histo1DPtr _hist_z_xs;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2012_I1124333);
}
diff --git a/analyses/pluginCDF/CDF_2012_NOTE10874.cc b/analyses/pluginCDF/CDF_2012_NOTE10874.cc
--- a/analyses/pluginCDF/CDF_2012_NOTE10874.cc
+++ b/analyses/pluginCDF/CDF_2012_NOTE10874.cc
@@ -1,94 +1,92 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class CDF_2012_NOTE10874 : public Analysis {
public:
CDF_2012_NOTE10874()
: Analysis("CDF_2012_NOTE10874")
{}
public:
void init() {
const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV);
declare(cfs, "CFS");
int isqrts = -1;
if (fuzzyEquals(sqrtS(), 300*GeV)) isqrts = 1;
else if (fuzzyEquals(sqrtS(), 900*GeV)) isqrts = 2;
else if (fuzzyEquals(sqrtS(), 1960*GeV)) isqrts = 3;
assert(isqrts >= 0);
book(_h_nch_transverse ,1,1,isqrts);
book(_h_ptSumDen ,2,1,isqrts);
book(_h_avePt ,3,1,isqrts);
}
// Little helper function to identify Delta(phi) regions
inline int region_index(double dphi) {
assert(inRange(dphi, 0.0, PI, CLOSED, CLOSED));
if (dphi < PI/3.0) return 0;
if (dphi < 2*PI/3.0) return 1;
return 2;
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
if (cfs.size() < 1) {
vetoEvent;
}
Particles particles = cfs.particlesByPt();
Particle p_lead = particles[0];
const double philead = p_lead.phi();
const double pTlead = p_lead.pT();
int tNch = 0;
double ptSum = 0.0;
foreach (const Particle& p, particles) {
const double pT = p.pT();
const double dPhi = deltaPhi(philead, p.phi());
const int ir = region_index(dPhi);
if (ir==1) {
tNch++;
ptSum += pT;
}
}
const double dEtadPhi = 4.0*PI/3.0;
- _h_nch_transverse->fill(pTlead/GeV, tNch/dEtadPhi, weight);
- _h_ptSumDen->fill(pTlead/GeV, ptSum/dEtadPhi, weight);
+ _h_nch_transverse->fill(pTlead/GeV, tNch/dEtadPhi);
+ _h_ptSumDen->fill(pTlead/GeV, ptSum/dEtadPhi);
if (tNch > 0) {
- _h_avePt->fill(pTlead/GeV, ptSum/tNch, weight);
+ _h_avePt->fill(pTlead/GeV, ptSum/tNch);
}
}
void finalize() {
}
private:
Profile1DPtr _h_nch_transverse, _h_ptSumDen, _h_avePt;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2012_NOTE10874);
}
diff --git a/analyses/pluginCDF/CDF_2015_1388868.cc b/analyses/pluginCDF/CDF_2015_I1388868.cc
rename from analyses/pluginCDF/CDF_2015_1388868.cc
rename to analyses/pluginCDF/CDF_2015_I1388868.cc
--- a/analyses/pluginCDF/CDF_2015_1388868.cc
+++ b/analyses/pluginCDF/CDF_2015_I1388868.cc
@@ -1,129 +1,128 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief CDF leading track underlying event at 300, 900 and 1960 GeV
/// @author Orestes Tumbarell Aranda (Havana), Hannes Jung (DESY)
class CDF_2015_I1388868 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(CDF_2015_I1388868);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Energy selection
double isqrts = 0;
if (fuzzyEquals(sqrtS()/GeV, 300, 1E-3)) {
isqrts = 3;
} else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
isqrts = 2;
} else if (fuzzyEquals(sqrtS()/GeV, 1960, 1E-3)) {
isqrts = 1;
} else {
throw UserError("Unexpected sqrtS ! Only 300, 900, 1960 GeV is supported by CDF_2015_I1388868");
}
MSG_DEBUG("CDF Tevatron UE: running with " << sqrtS()/GeV);
// Book projection
const ChargedFinalState cfs(Cuts::abseta < 0.8 && Cuts::pT > 0.5*GeV);
declare(cfs, "Tracks");
// Book profile histos
book(_NchgPDFden1 ,isqrts,1,1);
book(_NchgPMNden1 ,isqrts,1,2);
book(_NchgPMXden1 ,isqrts,1,3);
book(_NchgPden1 ,isqrts,1,4);
book(_PTsumPDFden1,isqrts,1,6);
book(_PTsumPMNden1,isqrts,1,7);
book(_PTsumPMXden1,isqrts,1,8);
book(_PTsumPden1 ,isqrts,1,9);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Require at least one track in the event with pT >= 0.5 GeV
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "Tracks");
if (cfs.empty()) vetoEvent;
const Particles trks = cfs.particlesByPt();
// Get lead track
const Particle p_lead = trks[0];
const double philead = p_lead.phi();
const double ptlead = p_lead.pT();
// Loop over tracks and compute variables
double NchgP1 = 0, NchgP2 = 0, PTsumP1 = 0, PTsumP2 = 0;
for (const Particle& p : trks) {
// Region definition -- if not in transverse region, ignore
const double dphi = mapAngle0To2Pi(p.phi() - philead);
if (!inRange(dphi, PI/3, 2*PI/3) && !inRange(dphi, 4*PI/3, 5*PI/3)) continue;
// Transverse region 1
if (inRange(dphi, PI/3, 2*PI/3)) {
NchgP1 += 1;
PTsumP1 += p.pT();
}
// Transverse region 2
else if (inRange(dphi, 4*PI/3, 5*PI/3)) {
NchgP2 += 1;
PTsumP2 += p.pT();
}
}
// Calculate total variables
const double NchgPtot = (NchgP1 + NchgP2)/2;
const double NchgPmax = max(NchgP1,NchgP2);
const double NchgPmin = min(NchgP1,NchgP2);
const double PTsumPtot = (PTsumP1 + PTsumP2)/2;
const double PTsumPmax = max(PTsumP1,PTsumP2);
const double PTsumPmin = min(PTsumP1,PTsumP2);
//
const double PTsumPMXden = PTsumPmax/AREA;
const double PTsumPMNden = PTsumPmin/AREA;
const double NchgPMXden = NchgPmax/AREA;
const double NchgPMNden = NchgPmin/AREA;
//
const double NchgPDFden = NchgPMXden - NchgPMNden;
const double PTsumPDFden = PTsumPMXden - PTsumPMNden;
// Fill histograms
- const double weight = 1.0;
- _NchgPden1 ->fill(ptlead/GeV, NchgPtot/AREA, weight );
- _NchgPMXden1->fill(ptlead/GeV, NchgPmax/AREA, weight );
- _NchgPMNden1->fill(ptlead/GeV, NchgPmin/AREA, weight );
- _NchgPDFden1->fill(ptlead/GeV, NchgPDFden , weight );
- _PTsumPden1 ->fill(ptlead/GeV, PTsumPtot/AREA, weight );
- _PTsumPMXden1->fill(ptlead/GeV, PTsumPmax/AREA, weight );
- _PTsumPMNden1->fill(ptlead/GeV, PTsumPmin/AREA, weight );
- _PTsumPDFden1->fill(ptlead/GeV, PTsumPDFden , weight );
+ _NchgPden1 ->fill(ptlead/GeV, NchgPtot/AREA);
+ _NchgPMXden1->fill(ptlead/GeV, NchgPmax/AREA);
+ _NchgPMNden1->fill(ptlead/GeV, NchgPmin/AREA);
+ _NchgPDFden1->fill(ptlead/GeV, NchgPDFden );
+ _PTsumPden1 ->fill(ptlead/GeV, PTsumPtot/AREA);
+ _PTsumPMXden1->fill(ptlead/GeV, PTsumPmax/AREA);
+ _PTsumPMNden1->fill(ptlead/GeV, PTsumPmin/AREA);
+ _PTsumPDFden1->fill(ptlead/GeV, PTsumPDFden );
}
//@}
/// eta-phi area of the transverse region
constexpr static double AREA = 2*0.8 * M_PI/3;
/// Histograms
Profile1DPtr _NchgPden1, _NchgPMXden1,_NchgPMNden1,_NchgPDFden1,_PTsumPden1,_PTsumPMXden1,_PTsumPMNden1,_PTsumPDFden1;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2015_I1388868);
}
diff --git a/analyses/pluginCDF/CDF_2015_1388868.info b/analyses/pluginCDF/CDF_2015_I1388868.info
rename from analyses/pluginCDF/CDF_2015_1388868.info
rename to analyses/pluginCDF/CDF_2015_I1388868.info
diff --git a/analyses/pluginCDF/CDF_2015_1388868.plot b/analyses/pluginCDF/CDF_2015_I1388868.plot
rename from analyses/pluginCDF/CDF_2015_1388868.plot
rename to analyses/pluginCDF/CDF_2015_I1388868.plot
diff --git a/analyses/pluginCDF/CDF_2015_1388868.yoda b/analyses/pluginCDF/CDF_2015_I1388868.yoda
rename from analyses/pluginCDF/CDF_2015_1388868.yoda
rename to analyses/pluginCDF/CDF_2015_I1388868.yoda
diff --git a/analyses/pluginD0/D0_1995_I398175.cc b/analyses/pluginD0/D0_1995_I398175.cc
--- a/analyses/pluginD0/D0_1995_I398175.cc
+++ b/analyses/pluginD0/D0_1995_I398175.cc
@@ -1,137 +1,136 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/JetShape.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 Run-1 jet shapes measurement
class D0_1995_I398175 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(D0_1995_I398175);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
const FinalState fs(-4.0, 4.0);
declare(fs, "FS");
// FastJets jets(fs, FastJets::ANTIKT, 0.6);
FastJets jets(fs, FastJets::D0ILCONE, 1.0);
jets.useInvisibles();
declare(jets, "Jets");
// Specify jets pT bins
_ptedges = {{ 45.0, 70.0, 105.0, 140.0, 1800.0}};
// Book histograms
for (size_t ptbin = 0; ptbin < 4; ++ptbin) {
_jsnames_pT[ptbin] = "JetShape" + to_str(ptbin) ;
const JetShape jsp(jets, 0.0, 1.0, 10, _ptedges[ptbin], _ptedges[ptbin+1], 0.0, 0.2, PSEUDORAPIDITY);
declare(jsp, _jsnames_pT[ptbin]);
book( _h_Rho_pT_central[ptbin] ,ptbin+1, 1, 1);
}
const JetShape jspfwd0(jets, 0.0, 1.0, 10, 45, 70, 2.5, 3.5, PSEUDORAPIDITY);
declare(jspfwd0, "JetShapeFwd0");
const JetShape jspfwd1(jets, 0.0, 1.0, 10, 70, 105, 2.5, 3.5, PSEUDORAPIDITY);
declare(jspfwd1, "JetShapeFwd1");
book( _h_Rho_pT_forward[0] ,5, 1, 1);
book( _h_Rho_pT_forward[1] ,6, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Get jets and require at least one to pass pT and y cuts
const Jets jets = apply<FastJets>(event, "Jets").jetsByPt(Cuts::ptIn(_ptedges.front()*GeV, _ptedges.back()*GeV) );
MSG_DEBUG("Selecting jets with pT> "<<_ptedges.front());
MSG_DEBUG("Jet multiplicity before cuts = " << jets.size());
if (jets.size() == 0){
MSG_DEBUG("No jets found in required pT and rapidity range");
vetoEvent;
}
- const double weight = 1.0;
// Calculate and histogram jet shapes
for (size_t ipt = 0; ipt < 4; ++ipt) {
const JetShape& jsipt = apply<JetShape>(event, _jsnames_pT[ipt]);
for (size_t ijet = 0; ijet < jsipt.numJets(); ++ijet) {
for (size_t rbin = 0; rbin < jsipt.numBins(); ++rbin) {
const double r_rho = jsipt.rBinMid(rbin);
MSG_DEBUG(ipt << " " << rbin << " (" << r_rho << ") " << jsipt.diffJetShape(ijet, rbin));
/// @note Bin width Jacobian factor of 0.7/0.1 = 7 in the differential shapes plot
- // _profhistRho_pT[ipt]->fill(r_rho/0.7, (0.7/0.1)*jsipt.diffJetShape(ijet, rbin), weight);
+ // _profhistRho_pT[ipt]->fill(r_rho/0.7, (0.7/0.1)*jsipt.diffJetShape(ijet, rbin));
const double r_Psi = jsipt.rBinMax(rbin);
MSG_DEBUG(ipt << " " << rbin << " (" << r_rho << ") " << jsipt.intJetShape(ijet, rbin));
- _h_Rho_pT_central[ipt]->fill(r_Psi/1.0, jsipt.intJetShape(ijet, rbin), weight);
+ _h_Rho_pT_central[ipt]->fill(r_Psi/1.0, jsipt.intJetShape(ijet, rbin));
}
}
}
const JetShape& jsiptfwd0 = apply<JetShape>(event, "JetShapeFwd0");
for (size_t ijet = 0; ijet < jsiptfwd0.numJets(); ++ijet) {
for (size_t rbin = 0; rbin < jsiptfwd0.numBins(); ++rbin) {
const double r_Psi = jsiptfwd0.rBinMax(rbin);
- _h_Rho_pT_forward[0]->fill(r_Psi/1.0, jsiptfwd0.intJetShape(ijet, rbin), weight);
+ _h_Rho_pT_forward[0]->fill(r_Psi/1.0, jsiptfwd0.intJetShape(ijet, rbin));
}
}
const JetShape& jsiptfwd1 = apply<JetShape>(event, "JetShapeFwd1");
for (size_t ijet = 0; ijet < jsiptfwd1.numJets(); ++ijet) {
for (size_t rbin = 0; rbin < jsiptfwd1.numBins(); ++rbin) {
const double r_Psi = jsiptfwd1.rBinMax(rbin);
- _h_Rho_pT_forward[1]->fill(r_Psi/1.0, jsiptfwd1.intJetShape(ijet, rbin), weight);
+ _h_Rho_pT_forward[1]->fill(r_Psi/1.0, jsiptfwd1.intJetShape(ijet, rbin));
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
// scale(_h_YYYY, crossSection()/sumOfWeights()); // norm to cross section
// normalize(_h_YYYY); // normalize to unity
}
//@}
private:
vector<double> _ptedges;
string _jsnames_pT[4];
/// @name Histograms
//@{
Profile1DPtr _h_Rho_pT_central[4];
Profile1DPtr _h_Rho_pT_forward[2];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_1995_I398175);
}
diff --git a/analyses/pluginD0/D0_1996_S3214044.cc b/analyses/pluginD0/D0_1996_S3214044.cc
--- a/analyses/pluginD0/D0_1996_S3214044.cc
+++ b/analyses/pluginD0/D0_1996_S3214044.cc
@@ -1,267 +1,265 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Math/LorentzTrans.hh"
namespace Rivet {
/// @brief D0 topological distributions of 3- and 4-jet events.
class D0_1996_S3214044 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
D0_1996_S3214044() : Analysis("D0_1996_S3214044")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
const FinalState fs;
declare(fs, "FS");
/// @todo Use correct jet algorithm --- tried FJ3 D0RunICone but does
// not look as good as the Run2 cone alg used here
declare(FastJets(fs, FastJets::D0ILCONE, 0.7), "ConeJets");
book(_h_3j_x3 ,1, 1, 1);
book(_h_3j_x5 ,2, 1, 1);
book(_h_3j_costheta3 ,3, 1, 1);
book(_h_3j_psi ,4, 1, 1);
book(_h_3j_mu34 ,5, 1, 1);
book(_h_3j_mu35 ,6, 1, 1);
book(_h_3j_mu45 ,7, 1, 1);
book(_h_4j_x3 ,8, 1, 1);
book(_h_4j_x4 ,9, 1, 1);
book(_h_4j_x5 ,10, 1, 1);
book(_h_4j_x6 ,11, 1, 1);
book(_h_4j_costheta3 ,12, 1, 1);
book(_h_4j_costheta4 ,13, 1, 1);
book(_h_4j_costheta5 ,14, 1, 1);
book(_h_4j_costheta6 ,15, 1, 1);
book(_h_4j_cosomega34 ,16, 1, 1);
book(_h_4j_cosomega35 ,17, 1, 1);
book(_h_4j_cosomega36 ,18, 1, 1);
book(_h_4j_cosomega45 ,19, 1, 1);
book(_h_4j_cosomega46 ,20, 1, 1);
book(_h_4j_cosomega56 ,21, 1, 1);
book(_h_4j_mu34 ,22, 1, 1);
book(_h_4j_mu35 ,23, 1, 1);
book(_h_4j_mu36 ,24, 1, 1);
book(_h_4j_mu45 ,25, 1, 1);
book(_h_4j_mu46 ,26, 1, 1);
book(_h_4j_mu56 ,27, 1, 1);
book(_h_4j_theta_BZ ,28, 1, 1);
book(_h_4j_costheta_NR ,29, 1, 1);
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
Jets jets_in = apply<FastJets>(event, "ConeJets")
.jets(Cuts::Et > 20*GeV && Cuts::abseta < 3, cmpMomByEt);
Jets jets_isolated;
for (size_t i = 0; i < jets_in.size(); ++i) {
bool isolated = true;
for (size_t j = 0; j < jets_in.size(); ++j) {
if (i != j && deltaR(jets_in[i], jets_in[j]) < 1.4) {
isolated = false;
break;
}
}
if (isolated) jets_isolated.push_back(jets_in[i]);
}
if (jets_isolated.size() == 0 || jets_isolated[0].Et() < 60.0*GeV) vetoEvent;
- if (jets_isolated.size() > 2) _threeJetAnalysis(jets_isolated, weight);
- if (jets_isolated.size() > 3) _fourJetAnalysis(jets_isolated, weight);
+ if (jets_isolated.size() > 2) _threeJetAnalysis(jets_isolated);
+ if (jets_isolated.size() > 3) _fourJetAnalysis(jets_isolated);
}
void finalize() {
normalize(_h_3j_x3, 1.0);
normalize(_h_3j_x5, 1.0);
normalize(_h_3j_costheta3, 1.0);
normalize(_h_3j_psi, 1.0);
normalize(_h_3j_mu34, 1.0);
normalize(_h_3j_mu35, 1.0);
normalize(_h_3j_mu45, 1.0);
normalize(_h_4j_x3, 1.0);
normalize(_h_4j_x4, 1.0);
normalize(_h_4j_x5, 1.0);
normalize(_h_4j_x6, 1.0);
normalize(_h_4j_costheta3, 1.0);
normalize(_h_4j_costheta4, 1.0);
normalize(_h_4j_costheta5, 1.0);
normalize(_h_4j_costheta6, 1.0);
normalize(_h_4j_cosomega34, 1.0);
normalize(_h_4j_cosomega35, 1.0);
normalize(_h_4j_cosomega36, 1.0);
normalize(_h_4j_cosomega45, 1.0);
normalize(_h_4j_cosomega46, 1.0);
normalize(_h_4j_cosomega56, 1.0);
normalize(_h_4j_mu34, 1.0);
normalize(_h_4j_mu35, 1.0);
normalize(_h_4j_mu36, 1.0);
normalize(_h_4j_mu45, 1.0);
normalize(_h_4j_mu46, 1.0);
normalize(_h_4j_mu56, 1.0);
normalize(_h_4j_theta_BZ, 1.0);
normalize(_h_4j_costheta_NR, 1.0);
}
//@}
private:
/// @name Helper functions
//@{
- void _threeJetAnalysis(const Jets& jets, const double& weight) {
+ void _threeJetAnalysis(const Jets& jets) {
// >=3 jet events
FourMomentum jjj(jets[0].momentum()+jets[1].momentum()+jets[2].momentum());
const double sqrts = _safeMass(jjj);
if (sqrts<200*GeV) {
return;
}
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jjj.betaVec());
vector<FourMomentum> jets_boosted;
foreach (Jet jet, jets) {
jets_boosted.push_back(cms_boost.transform(jet.momentum()));
}
std::sort(jets_boosted.begin(), jets_boosted.end(), FourMomentum::byEDescending());
FourMomentum p3(jets_boosted[0]);
FourMomentum p4(jets_boosted[1]);
FourMomentum p5(jets_boosted[2]);
Vector3 beam1(0.0, 0.0, 1.0);
Vector3 p1xp3 = beam1.cross(p3.p3());
Vector3 p4xp5 = p4.p3().cross(p5.p3());
const double cospsi = p1xp3.dot(p4xp5)/p1xp3.mod()/p4xp5.mod();
- _h_3j_x3->fill(2.0*p3.E()/sqrts, weight);
- _h_3j_x5->fill(2.0*p5.E()/sqrts, weight);
- _h_3j_costheta3->fill(fabs(cos(p3.theta())), weight);
- _h_3j_psi->fill(acos(cospsi)/degree, weight);
- _h_3j_mu34->fill(_safeMass(FourMomentum(p3+p4))/sqrts, weight);
- _h_3j_mu35->fill(_safeMass(FourMomentum(p3+p5))/sqrts, weight);
- _h_3j_mu45->fill(_safeMass(FourMomentum(p4+p5))/sqrts, weight);
+ _h_3j_x3->fill(2.0*p3.E()/sqrts);
+ _h_3j_x5->fill(2.0*p5.E()/sqrts);
+ _h_3j_costheta3->fill(fabs(cos(p3.theta())));
+ _h_3j_psi->fill(acos(cospsi)/degree);
+ _h_3j_mu34->fill(_safeMass(FourMomentum(p3+p4))/sqrts);
+ _h_3j_mu35->fill(_safeMass(FourMomentum(p3+p5))/sqrts);
+ _h_3j_mu45->fill(_safeMass(FourMomentum(p4+p5))/sqrts);
}
- void _fourJetAnalysis(const Jets& jets, const double& weight) {
+ void _fourJetAnalysis(const Jets& jets) {
// >=4 jet events
FourMomentum jjjj(jets[0].momentum() + jets[1].momentum() + jets[2].momentum()+ jets[3].momentum());
const double sqrts = _safeMass(jjjj);
if (sqrts < 200*GeV) return;
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(jjjj.betaVec());
vector<FourMomentum> jets_boosted;
foreach (Jet jet, jets) {
jets_boosted.push_back(cms_boost.transform(jet.momentum()));
}
sort(jets_boosted.begin(), jets_boosted.end(), FourMomentum::byEDescending());
FourMomentum p3(jets_boosted[0]);
FourMomentum p4(jets_boosted[1]);
FourMomentum p5(jets_boosted[2]);
FourMomentum p6(jets_boosted[3]);
Vector3 p3xp4 = p3.p3().cross(p4.p3());
Vector3 p5xp6 = p5.p3().cross(p6.p3());
const double costheta_BZ = p3xp4.dot(p5xp6)/p3xp4.mod()/p5xp6.mod();
const double costheta_NR = (p3.p3()-p4.p3()).dot(p5.p3()-p6.p3())/
(p3.p3()-p4.p3()).mod()/(p5.p3()-p6.p3()).mod();
- _h_4j_x3->fill(2.0*p3.E()/sqrts, weight);
- _h_4j_x4->fill(2.0*p4.E()/sqrts, weight);
- _h_4j_x5->fill(2.0*p5.E()/sqrts, weight);
- _h_4j_x6->fill(2.0*p6.E()/sqrts, weight);
- _h_4j_costheta3->fill(fabs(cos(p3.theta())), weight);
- _h_4j_costheta4->fill(fabs(cos(p4.theta())), weight);
- _h_4j_costheta5->fill(fabs(cos(p5.theta())), weight);
- _h_4j_costheta6->fill(fabs(cos(p6.theta())), weight);
- _h_4j_cosomega34->fill(cos(p3.angle(p4)), weight);
- _h_4j_cosomega35->fill(cos(p3.angle(p5)), weight);
- _h_4j_cosomega36->fill(cos(p3.angle(p6)), weight);
- _h_4j_cosomega45->fill(cos(p4.angle(p5)), weight);
- _h_4j_cosomega46->fill(cos(p4.angle(p6)), weight);
- _h_4j_cosomega56->fill(cos(p5.angle(p6)), weight);
- _h_4j_mu34->fill(_safeMass(FourMomentum(p3+p4))/sqrts, weight);
- _h_4j_mu35->fill(_safeMass(FourMomentum(p3+p5))/sqrts, weight);
- _h_4j_mu36->fill(_safeMass(FourMomentum(p3+p6))/sqrts, weight);
- _h_4j_mu45->fill(_safeMass(FourMomentum(p4+p5))/sqrts, weight);
- _h_4j_mu46->fill(_safeMass(FourMomentum(p4+p6))/sqrts, weight);
- _h_4j_mu56->fill(_safeMass(FourMomentum(p5+p6))/sqrts, weight);
- _h_4j_theta_BZ->fill(acos(fabs(costheta_BZ))/degree, weight);
- _h_4j_costheta_NR->fill(fabs(costheta_NR), weight);
+ _h_4j_x3->fill(2.0*p3.E()/sqrts);
+ _h_4j_x4->fill(2.0*p4.E()/sqrts);
+ _h_4j_x5->fill(2.0*p5.E()/sqrts);
+ _h_4j_x6->fill(2.0*p6.E()/sqrts);
+ _h_4j_costheta3->fill(fabs(cos(p3.theta())));
+ _h_4j_costheta4->fill(fabs(cos(p4.theta())));
+ _h_4j_costheta5->fill(fabs(cos(p5.theta())));
+ _h_4j_costheta6->fill(fabs(cos(p6.theta())));
+ _h_4j_cosomega34->fill(cos(p3.angle(p4)));
+ _h_4j_cosomega35->fill(cos(p3.angle(p5)));
+ _h_4j_cosomega36->fill(cos(p3.angle(p6)));
+ _h_4j_cosomega45->fill(cos(p4.angle(p5)));
+ _h_4j_cosomega46->fill(cos(p4.angle(p6)));
+ _h_4j_cosomega56->fill(cos(p5.angle(p6)));
+ _h_4j_mu34->fill(_safeMass(FourMomentum(p3+p4))/sqrts);
+ _h_4j_mu35->fill(_safeMass(FourMomentum(p3+p5))/sqrts);
+ _h_4j_mu36->fill(_safeMass(FourMomentum(p3+p6))/sqrts);
+ _h_4j_mu45->fill(_safeMass(FourMomentum(p4+p5))/sqrts);
+ _h_4j_mu46->fill(_safeMass(FourMomentum(p4+p6))/sqrts);
+ _h_4j_mu56->fill(_safeMass(FourMomentum(p5+p6))/sqrts);
+ _h_4j_theta_BZ->fill(acos(fabs(costheta_BZ))/degree);
+ _h_4j_costheta_NR->fill(fabs(costheta_NR));
}
double _safeMass(const FourMomentum& p) {
double mass2=p.mass2();
if (mass2>0.0) return sqrt(mass2);
else if (mass2<-1.0e-5) {
MSG_WARNING("m2 = " << m2 << ". Assuming m2=0.");
return 0.0;
}
else return 0.0;
}
private:
/// @name Histograms
//@{
Histo1DPtr _h_3j_x3;
Histo1DPtr _h_3j_x5;
Histo1DPtr _h_3j_costheta3;
Histo1DPtr _h_3j_psi;
Histo1DPtr _h_3j_mu34;
Histo1DPtr _h_3j_mu35;
Histo1DPtr _h_3j_mu45;
Histo1DPtr _h_4j_x3;
Histo1DPtr _h_4j_x4;
Histo1DPtr _h_4j_x5;
Histo1DPtr _h_4j_x6;
Histo1DPtr _h_4j_costheta3;
Histo1DPtr _h_4j_costheta4;
Histo1DPtr _h_4j_costheta5;
Histo1DPtr _h_4j_costheta6;
Histo1DPtr _h_4j_cosomega34;
Histo1DPtr _h_4j_cosomega35;
Histo1DPtr _h_4j_cosomega36;
Histo1DPtr _h_4j_cosomega45;
Histo1DPtr _h_4j_cosomega46;
Histo1DPtr _h_4j_cosomega56;
Histo1DPtr _h_4j_mu34;
Histo1DPtr _h_4j_mu35;
Histo1DPtr _h_4j_mu36;
Histo1DPtr _h_4j_mu45;
Histo1DPtr _h_4j_mu46;
Histo1DPtr _h_4j_mu56;
Histo1DPtr _h_4j_theta_BZ;
Histo1DPtr _h_4j_costheta_NR;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_1996_S3214044);
}
diff --git a/analyses/pluginD0/D0_2000_I499943.cc b/analyses/pluginD0/D0_2000_I499943.cc
--- a/analyses/pluginD0/D0_2000_I499943.cc
+++ b/analyses/pluginD0/D0_2000_I499943.cc
@@ -1,107 +1,107 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/HeavyHadrons.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
namespace Rivet {
class D0_2000_I499943 : public Analysis {
public:
/// Constructor
D0_2000_I499943()
: Analysis("D0_2000_I499943")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs;
IdentifiedFinalState muons(Cuts::abseta < 0.8 && Cuts::pT > 4.0*GeV);
muons.acceptIdPair(PID::MUON);
declare(muons, "Muons");
FastJets jetproj(fs, FastJets::D0ILCONE, 0.7);
jetproj.useInvisibles();
declare(jetproj, "Jets");
// Book histograms
book(_h_pt_leading_mu ,1, 1, 1);
book(_h_dphi_mumu ,3, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const Jets& jets = apply<FastJets>(event, "Jets").jetsByPt(12*GeV);
if (jets.size() < 2) vetoEvent;
const Particles& muons = apply<IdentifiedFinalState>(event, "Muons").particlesByPt();
if (muons.size() < 2) vetoEvent;
// Muon selection: require the muons to be *close* to jets, not the usual overlap vetoing!
Particles cand_mu;
foreach (const Particle& mu, muons) {
// Ignore muons in "bad" region 80 < phi < 110 degrees
/// @todo Is this really not corrected for?!
if (inRange(mu.phi(), 1.4, 1.92)) continue;
// A muon is a good candidate if within R = 0.8 of a jet
foreach (const Jet& jet, jets) {
if (deltaR(mu, jet) < 0.8) {
cand_mu.push_back(mu);
break;
}
}
}
// Must find at least two jet-matched muons in the event
if (cand_mu.size() < 2) vetoEvent;
/// @todo Is this cut needed? Does space angle mean dR or 3D opening angle in lab frame?
// Remove muon pairs closer than 165 deg in space angle (cosmic veto)
// double dR_mumu = deltaR(cand_mu[0].momentum(), cand_mu[1].momentum());
// if (dR_mumu < 165*degree) vetoEvent;
// Selecting muon pairs with 6 < mass < 35 GeV (we use the two with highest pT)
double m_mumu = (cand_mu[0].momentum() + cand_mu[1].momentum()).mass();
if (!inRange(m_mumu, 6*GeV, 35*GeV)) vetoEvent;
// Get phi angle between muons in degrees
double dphi_mumu = deltaPhi(cand_mu[0], cand_mu[1]) * 180/M_PI;
// Fill histos
- _h_pt_leading_mu->fill(cand_mu[0].pt()/GeV, 1.0);
- _h_dphi_mumu->fill(dphi_mumu, 1.0);
+ _h_pt_leading_mu->fill(cand_mu[0].pt()/GeV);
+ _h_dphi_mumu->fill(dphi_mumu);
}
// Normalise histograms to cross-section
void finalize() {
scale(_h_pt_leading_mu, crossSection()/sumOfWeights()/nanobarn);
scale(_h_dphi_mumu, crossSection()/sumOfWeights()/nanobarn);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_pt_leading_mu, _h_dphi_mumu;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2000_I499943);
}
diff --git a/analyses/pluginD0/D0_2000_I503361.cc b/analyses/pluginD0/D0_2000_I503361.cc
--- a/analyses/pluginD0/D0_2000_I503361.cc
+++ b/analyses/pluginD0/D0_2000_I503361.cc
@@ -1,81 +1,79 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @ D0 Run I Z \f$ p_\perp \f$ in Drell-Yan events
/// @author Simone Amoroso
class D0_2000_I503361 : public Analysis {
public:
/// Constructor
D0_2000_I503361()
: Analysis("D0_2000_I503361")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
/// Initialise and register projections here
ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON, 75*GeV, 105*GeV, 0.0*GeV, ZFinder::NOCLUSTER);
declare(zfinder, "ZFinder");
book(_hist_zpt ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
/// @todo Do the event by event analysis here
const ZFinder& zfinder = apply<ZFinder>(event, "ZFinder");
if (zfinder.bosons().size() != 1) {
MSG_DEBUG("Num e+ e- pairs found = " << zfinder.bosons().size());
vetoEvent;
}
const FourMomentum& pZ = zfinder.bosons()[0].momentum();
if (pZ.mass2() < 0) {
MSG_DEBUG("Negative Z mass**2 = " << pZ.mass2()/GeV2 << "!");
vetoEvent;
}
MSG_DEBUG("Dilepton mass = " << pZ.mass()/GeV << " GeV");
- _hist_zpt->fill(pZ.pT(), weight);
+ _hist_zpt->fill(pZ.pT());
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_hist_zpt, crossSection()/picobarn/sumOfWeights());
}
//@}
private:
// Data members like post-cuts event weight counters go here
/// @name Histograms
//@{
Histo1DPtr _hist_zpt;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2000_I503361);
}
diff --git a/analyses/pluginD0/D0_2000_S4480767.cc b/analyses/pluginD0/D0_2000_S4480767.cc
--- a/analyses/pluginD0/D0_2000_S4480767.cc
+++ b/analyses/pluginD0/D0_2000_S4480767.cc
@@ -1,66 +1,64 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/WFinder.hh"
namespace Rivet {
class D0_2000_S4480767 : public Analysis {
public:
/// Constructor
D0_2000_S4480767()
: Analysis("D0_2000_S4480767")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FinalState fs;
WFinder wf(fs, Cuts::abseta < 5, PID::ELECTRON, 0.0*GeV, 200.0*GeV, 0.0*GeV, 0.2);
declare(wf, "WFinder");
book(_h_W_pT ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
const WFinder& wf = apply<WFinder>(event, "WFinder");
if (wf.bosons().size() == 0) vetoEvent;
- _h_W_pT->fill(wf.bosons()[0].pT()/GeV, weight);
+ _h_W_pT->fill(wf.bosons()[0].pT()/GeV);
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_W_pT, crossSection()/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_W_pT;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2000_S4480767);
}
diff --git a/analyses/pluginD0/D0_2001_S4674421.cc b/analyses/pluginD0/D0_2001_S4674421.cc
--- a/analyses/pluginD0/D0_2001_S4674421.cc
+++ b/analyses/pluginD0/D0_2001_S4674421.cc
@@ -1,191 +1,189 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
namespace Rivet {
/// @brief D0 Run I differential W/Z boson cross-section analysis
/// @author Lars Sonnenschein
/// @author Andy Buckley
class D0_2001_S4674421 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor.
D0_2001_S4674421()
: Analysis("D0_2001_S4674421")
{ }
/// @name Analysis methods
//@{
void init() {
// Final state projection
FinalState fs(-5.0, 5.0); // corrected for detector acceptance
declare(fs, "FS");
// Z -> e- e+
LeadingParticlesFinalState eeFS(FinalState(-5.0, 5.0, 0.)); //20.);
eeFS.addParticleIdPair(PID::ELECTRON);
declare(eeFS, "eeFS");
// W- -> e- nu_e~
LeadingParticlesFinalState enuFS(FinalState(-5.0, 5.0, 0.)); //25.);
enuFS.addParticleId(PID::ELECTRON).addParticleId(PID::NU_EBAR);
declare(enuFS, "enuFS");
// W+ -> e+ nu_e
LeadingParticlesFinalState enubFS(FinalState(-5.0, 5.0, 0.)); //25.);
enubFS.addParticleId(PID::POSITRON).addParticleId(PID::NU_E);
declare(enubFS, "enubFS");
// Remove neutrinos for isolation of final state particles
VetoedFinalState vfs(fs);
vfs.vetoNeutrinos();
declare(vfs, "VFS");
// Counters
- _eventsFilledW = 0.0;
- _eventsFilledZ = 0.0;
+ book(_eventsFilledW,"eventsFilledW");
+ book(_eventsFilledZ,"eventsFilledZ");
// Histograms
book(_h_dsigdpt_w ,1, 1, 1);
book(_h_dsigdpt_z ,1, 1, 2);
book(_h_dsigdpt_scaled_z, 2, 1, 1);
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
const LeadingParticlesFinalState& eeFS = apply<LeadingParticlesFinalState>(event, "eeFS");
// Z boson analysis
if (eeFS.particles().size() >= 2) {
// If there is a Z candidate:
// Fill Z pT distributions
double deltaM2=1e30,mass2(0.);
double pT=-1.;
const Particles& Zdaughters = eeFS.particles();
for (size_t ix = 0; ix < Zdaughters.size(); ++ix) {
for (size_t iy = ix+1; iy < Zdaughters.size(); ++iy) {
if (Zdaughters[ix].pid()!=-Zdaughters[iy].pid()) continue;
const FourMomentum pmom = Zdaughters[ix].momentum() + Zdaughters[iy].momentum();
double mz2 = pmom.mass2();
double dm2 = fabs(mz2 - sqr(91.118*GeV));
if (dm2 < deltaM2) {
pT = pmom.pT();
deltaM2 = dm2;
mass2 = mz2;
}
}
}
if (pT > 0. && mass2 > 0. && inRange(sqrt(mass2)/GeV, 75.0, 105.0)) {
- _eventsFilledZ += weight;
+ _eventsFilledZ->fill();
MSG_DEBUG("Z pmom.pT() = " << pT/GeV << " GeV");
- _h_dsigdpt_z->fill(pT/GeV, weight);
+ _h_dsigdpt_z->fill(pT/GeV);
// return if found a Z
return;
}
}
// There is no Z -> ee candidate... so this might be a W event
const LeadingParticlesFinalState& enuFS = apply<LeadingParticlesFinalState>(event, "enuFS");
const LeadingParticlesFinalState& enubFS = apply<LeadingParticlesFinalState>(event, "enubFS");
double deltaM2=1e30;
double pT=-1.;
for (size_t iw = 0; iw < 2; ++iw) {
Particles Wdaughters;
Wdaughters = (iw == 0) ? enuFS.particles() : enubFS.particles();
for (size_t ix = 0; ix < Wdaughters.size(); ++ix) {
for (size_t iy = ix+1; iy < Wdaughters.size(); ++iy) {
if (Wdaughters[ix].pid() == Wdaughters[iy].pid()) continue;
const FourMomentum pmom = Wdaughters[0].momentum() + Wdaughters[1].momentum();
double dm2 = abs(pmom.mass2() - sqr(80.4*GeV));
if (dm2 < deltaM2) {
pT = pmom.pT();
deltaM2 = dm2;
}
}
}
}
if (pT > 0.) {
- _eventsFilledW += weight;
- _h_dsigdpt_w->fill(pT/GeV, weight);
+ _eventsFilledW->fill();
+ _h_dsigdpt_w->fill(pT/GeV);
}
}
void finalize() {
// Get cross-section per event (i.e. per unit weight) from generator
const double xSecPerEvent = crossSectionPerEvent()/picobarn;
// Correct W pT distribution to W cross-section
- const double xSecW = xSecPerEvent * _eventsFilledW;
+ const double xSecW = xSecPerEvent * double(_eventsFilledW);
// Correct Z pT distribution to Z cross-section
- const double xSecZ = xSecPerEvent * _eventsFilledZ;
+ const double xSecZ = xSecPerEvent * double(_eventsFilledZ);
// Get W and Z pT integrals
const double wpt_integral = _h_dsigdpt_w->integral();
const double zpt_integral = _h_dsigdpt_z->integral();
// Divide and scale ratio histos
if (xSecW == 0 || wpt_integral == 0 || xSecZ == 0 || zpt_integral == 0) {
MSG_WARNING("Not filling ratio plot because input histos are empty");
} else {
// Scale factor converts event counts to cross-sections, and inverts the
// branching ratios since only one decay channel has been analysed for each boson.
// Oh, and we put MW/MZ in, like they do in the paper.
const double MW_MZ = 0.8820; // Ratio M_W/M_Z
const double BRZEE_BRWENU = 0.033632 / 0.1073; // Ratio of branching fractions
const double scalefactor = (xSecW / wpt_integral) / (xSecZ / zpt_integral) * MW_MZ * BRZEE_BRWENU;
for (size_t ibin = 0; ibin < _h_dsigdpt_w->numBins(); ibin++) {
const double xval = _h_dsigdpt_w->bin(ibin).xMid();
const double xerr = _h_dsigdpt_w->bin(ibin).xWidth() / 2.;
double yval(0), yerr(0);
if (_h_dsigdpt_w->bin(ibin).sumW() != 0 && _h_dsigdpt_z->bin(ibin).sumW() != 0) {
yval = scalefactor * _h_dsigdpt_w->bin(ibin).sumW() / _h_dsigdpt_z->bin(ibin).sumW();
yerr = yval * sqrt( sqr(_h_dsigdpt_w->bin(ibin).relErr()) + sqr(_h_dsigdpt_z->bin(ibin).areaErr()) );
}
_h_dsigdpt_scaled_z->addPoint(xval, yval, xerr, yerr);
}
}
// Normalize non-ratio histos
normalize(_h_dsigdpt_w, xSecW);
normalize(_h_dsigdpt_z, xSecZ);
}
//@}
private:
/// @name Event counters for cross section normalizations
//@{
- double _eventsFilledW;
- double _eventsFilledZ;
+ CounterPtr _eventsFilledW;
+ CounterPtr _eventsFilledZ;
//@}
//@{
/// Histograms
Histo1DPtr _h_dsigdpt_w;
Histo1DPtr _h_dsigdpt_z;
Scatter2DPtr _h_dsigdpt_scaled_z;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2001_S4674421);
}
diff --git a/analyses/pluginD0/D0_2004_S5992206.cc b/analyses/pluginD0/D0_2004_S5992206.cc
--- a/analyses/pluginD0/D0_2004_S5992206.cc
+++ b/analyses/pluginD0/D0_2004_S5992206.cc
@@ -1,138 +1,137 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/VisibleFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
namespace Rivet {
/* @brief D0 Run II angular correlations in di-jet events
* @author Lars Sonnenschein
*
* Measurement of angular correlations in di-jet events.
*
* @par Run conditions
*
* @arg \f$ \sqrt{s} = \f$ 1960 GeV
* @arg Run with generic QCD events.
* @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the histograms:
* @arg \f$ p_\perp^\text{min} = \f$ 50, 75, 100, 150 GeV for the four pT ranges respecively
*
*/
class D0_2004_S5992206 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor.
D0_2004_S5992206()
: Analysis("D0_2004_S5992206")
{ }
//@}
/// @name Analysis methods
//@{
void init() {
// Final state for jets, mET etc.
const FinalState fs(-3.0, 3.0);
declare(fs, "FS");
// Veto neutrinos, and muons with pT above 1.0 GeV
VetoedFinalState vfs(fs);
vfs.vetoNeutrinos();
vfs.addVetoPairDetail(PID::MUON, 1.0*GeV, MAXDOUBLE);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::D0ILCONE, 0.7), "Jets");
declare(MissingMomentum(vfs), "CalMET");
// Book histograms
book(_histJetAzimuth_pTmax75_100 ,1, 2, 1);
book(_histJetAzimuth_pTmax100_130 ,2, 2, 1);
book(_histJetAzimuth_pTmax130_180 ,3, 2, 1);
book(_histJetAzimuth_pTmax180_ ,4, 2, 1);
}
/// Do the analysis
void analyze(const Event& event) {
// Analyse and print some info
const JetAlg& jetpro = apply<JetAlg>(event, "Jets");
MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size());
const Jets jets = jetpro.jetsByPt(40.0*GeV);
if (jets.size() >= 2) {
MSG_DEBUG("Jet multiplicity after pT > 40 GeV cut = " << jets.size());
} else {
vetoEvent;
}
const double rap1 = jets[0].rapidity();
const double rap2 = jets[1].rapidity();
if (fabs(rap1) > 0.5 || fabs(rap2) > 0.5) {
vetoEvent;
}
MSG_DEBUG("Jet eta and pT requirements fulfilled");
const double pT1 = jets[0].pT();
const MissingMomentum& caloMissEt = apply<MissingMomentum>(event, "CalMET");
MSG_DEBUG("Missing vector Et = " << caloMissEt.vectorEt()/GeV << " GeV");
if (caloMissEt.vectorEt().mod() > 0.7*pT1) {
MSG_DEBUG("Vetoing event with too much missing ET: "
<< caloMissEt.vectorEt()/GeV << " GeV > "
<< 0.7*pT1/GeV << " GeV");
vetoEvent;
}
if (pT1/GeV >= 75.0) {
- const double weight = 1.0;
const double dphi = deltaPhi(jets[0].phi(), jets[1].phi());
if (inRange(pT1/GeV, 75.0, 100.0)) {
- _histJetAzimuth_pTmax75_100->fill(dphi, weight);
+ _histJetAzimuth_pTmax75_100->fill(dphi);
} else if (inRange(pT1/GeV, 100.0, 130.0)) {
- _histJetAzimuth_pTmax100_130->fill(dphi, weight);
+ _histJetAzimuth_pTmax100_130->fill(dphi);
} else if (inRange(pT1/GeV, 130.0, 180.0)) {
- _histJetAzimuth_pTmax130_180->fill(dphi, weight);
+ _histJetAzimuth_pTmax130_180->fill(dphi);
} else if (pT1/GeV > 180.0) {
- _histJetAzimuth_pTmax180_->fill(dphi, weight);
+ _histJetAzimuth_pTmax180_->fill(dphi);
}
}
}
// Finalize
void finalize() {
// Normalize histograms to unit area
normalize(_histJetAzimuth_pTmax75_100);
normalize(_histJetAzimuth_pTmax100_130);
normalize(_histJetAzimuth_pTmax130_180);
normalize(_histJetAzimuth_pTmax180_);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _histJetAzimuth_pTmax75_100;
Histo1DPtr _histJetAzimuth_pTmax100_130;
Histo1DPtr _histJetAzimuth_pTmax130_180;
Histo1DPtr _histJetAzimuth_pTmax180_;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2004_S5992206);
}
diff --git a/analyses/pluginD0/D0_2006_S6438750.cc b/analyses/pluginD0/D0_2006_S6438750.cc
--- a/analyses/pluginD0/D0_2006_S6438750.cc
+++ b/analyses/pluginD0/D0_2006_S6438750.cc
@@ -1,102 +1,101 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
namespace Rivet {
/// @brief D0 inclusive isolated photon cross-section vs. \f$ p_\perp(gamma) \f$.
/// @author Andy Buckley
/// @author Gavin Hesketh
class D0_2006_S6438750 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Default constructor.
D0_2006_S6438750()
: Analysis("D0_2006_S6438750")
{ }
//@}
/// @name Analysis methods
//@{
void init() {
// General FS for photon isolation
FinalState fs;
declare(fs, "AllFS");
// Get leading photon
LeadingParticlesFinalState photonfs(FinalState(-0.9, 0.9, 23.0*GeV));
photonfs.addParticleId(PID::PHOTON);
declare(photonfs, "LeadingPhoton");
// Book histograms
book(_h_pTgamma ,1, 1, 1);
}
/// Do the analysis
void analyze(const Event& event) {
// Get the photon
const FinalState& photonfs = apply<FinalState>(event, "LeadingPhoton");
if (photonfs.particles().size() != 1) {
vetoEvent;
}
const FourMomentum photon = photonfs.particles().front().momentum();
// Isolate photon by ensuring that a 0.4 cone around it contains less than 10% of the photon's energy
double E_P = photon.E();
double eta_P = photon.eta();
double phi_P = photon.phi();
double econe = 0.0;
foreach (const Particle& p, apply<FinalState>(event, "AllFS").particles()) {
if (deltaR(eta_P, phi_P,
p.eta(), p.phi()) < 0.4) {
econe += p.E();
if (econe/E_P > 1.1) {
vetoEvent;
}
}
}
// Fill histo
- const double weight = 1.0;
- _h_pTgamma->fill(photon.pT(), weight);
+ _h_pTgamma->fill(photon.pT());
}
// Finalize
void finalize() {
const double lumi_gen = sumOfWeights()/crossSection();
// Divide by effective lumi, plus rapidity bin width of 1.8
scale(_h_pTgamma, 1/lumi_gen * 1/1.8);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_pTgamma;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2006_S6438750);
}
diff --git a/analyses/pluginD0/D0_2007_S7075677.cc b/analyses/pluginD0/D0_2007_S7075677.cc
--- a/analyses/pluginD0/D0_2007_S7075677.cc
+++ b/analyses/pluginD0/D0_2007_S7075677.cc
@@ -1,74 +1,72 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @brief Measurement of D0 Run II Z \f$ p_\perp \f$ diff cross-section shape
/// @author Andy Buckley
/// @author Gavin Hesketh
/// @author Frank Siegert
class D0_2007_S7075677 : public Analysis {
public:
/// Default constructor.
D0_2007_S7075677()
: Analysis("D0_2007_S7075677")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
ZFinder zfinder(FinalState(), Cuts::open(), PID::ELECTRON,
71*GeV, 111*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder, "ZFinder");
book(_h_yZ ,1, 1, 1);
}
/// Do the analysis
void analyze(const Event & e) {
- const double weight = 1.0;
-
const ZFinder& zfinder = apply<ZFinder>(e, "ZFinder");
if (zfinder.bosons().size() == 1) {
const Particles& el(zfinder.constituents());
if (el[0].pT() > 25*GeV || el[1].pT() > 25*GeV) {
- _h_yZ->fill(fabs(zfinder.bosons()[0].rapidity()), weight);
+ _h_yZ->fill(fabs(zfinder.bosons()[0].rapidity()));
}
} else {
MSG_DEBUG("No unique lepton pair found.");
}
}
// Finalize
void finalize() {
// Data seems to have been normalized for the avg of the two sides
// (+ve & -ve rapidity) rather than the sum, hence the 0.5:
normalize(_h_yZ, 0.5);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_yZ;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2007_S7075677);
}
diff --git a/analyses/pluginD0/D0_2008_S6879055.cc b/analyses/pluginD0/D0_2008_S6879055.cc
--- a/analyses/pluginD0/D0_2008_S6879055.cc
+++ b/analyses/pluginD0/D0_2008_S6879055.cc
@@ -1,126 +1,122 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 measurement of the ratio \f$ \sigma(Z/\gamma^* + n \text{ jets})/\sigma(Z/\gamma^*) \f$
class D0_2008_S6879055 : public Analysis {
public:
/// Default constructor.
D0_2008_S6879055()
: Analysis("D0_2008_S6879055")
{ }
// DEFAULT_RIVET_ANA_CONSTRUCTOR(D0_2008_S6879055);
/// @name Analysis methods
//@{
// Book histograms
void init() {
FinalState fs;
ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON,
40*GeV, 200*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
book(_crossSectionRatio ,1, 1, 1);
book(_pTjet1 ,2, 1, 1);
book(_pTjet2 ,3, 1, 1);
book(_pTjet3 ,4, 1, 1);
}
/// Do the analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
-
-
const ZFinder& zfinder = apply<ZFinder>(event, "ZFinder");
if (zfinder.bosons().size()!=1) {
vetoEvent;
}
FourMomentum e0 = zfinder.constituents()[0].momentum();
FourMomentum e1 = zfinder.constituents()[1].momentum();
const double e0eta = e0.eta();
const double e0phi = e0.phi();
const double e1eta = e1.eta();
const double e1phi = e1.phi();
vector<FourMomentum> finaljet_list;
foreach (const Jet& j, apply<JetAlg>(event, "ConeFinder").jetsByPt(20*GeV)) {
const double jeta = j.eta();
const double jphi = j.phi();
if (fabs(jeta) < 2.5) {
if (deltaR(e0eta, e0phi, jeta, jphi) > 0.4 &&
deltaR(e1eta, e1phi, jeta, jphi) > 0.4) {
finaljet_list.push_back(j.momentum());
}
}
}
// For normalisation of crossSection data (includes events with no jets passing cuts)
- _crossSectionRatio->fill(0, weight);
+ _crossSectionRatio->fill(0);
// Fill jet pT and multiplicities
if (finaljet_list.size() >= 1) {
- _crossSectionRatio->fill(1, weight);
- _pTjet1->fill(finaljet_list[0].pT(), weight);
+ _crossSectionRatio->fill(1);
+ _pTjet1->fill(finaljet_list[0].pT());
}
if (finaljet_list.size() >= 2) {
- _crossSectionRatio->fill(2, weight);
- _pTjet2->fill(finaljet_list[1].pT(), weight);
+ _crossSectionRatio->fill(2);
+ _pTjet2->fill(finaljet_list[1].pT());
}
if (finaljet_list.size() >= 3) {
- _crossSectionRatio->fill(3, weight);
- _pTjet3->fill(finaljet_list[2].pT(), weight);
+ _crossSectionRatio->fill(3);
+ _pTjet3->fill(finaljet_list[2].pT());
}
if (finaljet_list.size() >= 4) {
- _crossSectionRatio->fill(4, weight);
+ _crossSectionRatio->fill(4);
}
}
/// Finalize
void finalize() {
// Now divide by the inclusive result
scale(_crossSectionRatio,1/_crossSectionRatio->bin(0).area());
// Normalise jet pTs to integrals of data
// @note There is no other way to do this, because these quantities are not detector-corrected
/// @todo Use integrals of refData()?
normalize(_pTjet1, 10439); // fixed norm OK
normalize(_pTjet2, 1461.5); // fixed norm OK
normalize(_pTjet3, 217); // fixed norm OK
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _crossSectionRatio;
Histo1DPtr _pTjet1;
Histo1DPtr _pTjet2;
Histo1DPtr _pTjet3;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2008_S6879055);
}
diff --git a/analyses/pluginD0/D0_2008_S7662670.cc b/analyses/pluginD0/D0_2008_S7662670.cc
--- a/analyses/pluginD0/D0_2008_S7662670.cc
+++ b/analyses/pluginD0/D0_2008_S7662670.cc
@@ -1,124 +1,122 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 differential jet cross sections
/// @author Andy Buckley
/// @author Gavin Hesketh
class D0_2008_S7662670 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
D0_2008_S7662670()
: Analysis("D0_2008_S7662670")
{ }
//@}
/// @name Analysis methods
//@{
void init()
{
// Full final state
FinalState fs;
declare(fs, "FS");
// Jets
FastJets jetpro(fs, FastJets::D0ILCONE, 0.7);
declare(jetpro, "Jets");
// Book histograms
book(_h_dsigdptdy_y00_04 ,1, 1, 1);
book(_h_dsigdptdy_y04_08 ,2, 1, 1);
book(_h_dsigdptdy_y08_12 ,3, 1, 1);
book(_h_dsigdptdy_y12_16 ,4, 1, 1);
book(_h_dsigdptdy_y16_20 ,5, 1, 1);
book(_h_dsigdptdy_y20_24 ,6, 1, 1);
}
/// Do the analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
// Skip if the event is empty
const FinalState& fs = apply<FinalState>(event, "FS");
if (fs.empty()) {
MSG_DEBUG("Empty event!");
vetoEvent;
}
// Find the jets
const JetAlg& jetpro = apply<JetAlg>(event, "Jets");
// Fill histo for each jet
foreach (const Jet& j, jetpro.jets(Cuts::pT > 50*GeV)) {
const double pt = j.pT();
const double y = j.absrap();
MSG_TRACE("Filling histos: pT = " << pt/GeV << ", |y| = " << y);
if (y < 0.4) {
- _h_dsigdptdy_y00_04->fill(pt/GeV, weight);
+ _h_dsigdptdy_y00_04->fill(pt/GeV);
} else if (y < 0.8) {
- _h_dsigdptdy_y04_08->fill(pt/GeV, weight);
+ _h_dsigdptdy_y04_08->fill(pt/GeV);
} else if (y < 1.2) {
- _h_dsigdptdy_y08_12->fill(pt/GeV, weight);
+ _h_dsigdptdy_y08_12->fill(pt/GeV);
} else if (y < 1.6) {
- _h_dsigdptdy_y12_16->fill(pt/GeV, weight);
+ _h_dsigdptdy_y12_16->fill(pt/GeV);
} else if (y < 2.0) {
- _h_dsigdptdy_y16_20->fill(pt/GeV, weight);
+ _h_dsigdptdy_y16_20->fill(pt/GeV);
} else if (y < 2.4) {
- _h_dsigdptdy_y20_24->fill(pt/GeV, weight);
+ _h_dsigdptdy_y20_24->fill(pt/GeV);
}
}
}
/// Finalize
void finalize() {
/// Scale by L_eff = sig_MC * L_exp / num_MC
const double lumi_mc = sumOfWeights() / crossSection();
const double scalefactor = 1 / lumi_mc;
scale(_h_dsigdptdy_y00_04, scalefactor);
scale(_h_dsigdptdy_y04_08, scalefactor);
scale(_h_dsigdptdy_y08_12, scalefactor);
scale(_h_dsigdptdy_y12_16, scalefactor);
scale(_h_dsigdptdy_y16_20, scalefactor);
scale(_h_dsigdptdy_y20_24, scalefactor);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_dsigdptdy_y00_04;
Histo1DPtr _h_dsigdptdy_y04_08;
Histo1DPtr _h_dsigdptdy_y08_12;
Histo1DPtr _h_dsigdptdy_y12_16;
Histo1DPtr _h_dsigdptdy_y16_20;
Histo1DPtr _h_dsigdptdy_y20_24;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2008_S7662670);
}
diff --git a/analyses/pluginD0/D0_2008_S7719523.cc b/analyses/pluginD0/D0_2008_S7719523.cc
--- a/analyses/pluginD0/D0_2008_S7719523.cc
+++ b/analyses/pluginD0/D0_2008_S7719523.cc
@@ -1,202 +1,200 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
// A local scope function for division, handling the div-by-zero case
/// @todo Why isn't the math divide() function being found?
namespace {
inline double _safediv(double a, double b, double result_if_err) {
return (b != 0) ? a/b : result_if_err;
}
}
/// @brief Measurement of isolated gamma + jet + X differential cross-sections
///
/// Inclusive isolated gamma + jet cross-sections, differential in pT(gamma), for
/// various photon and jet rapidity bins.
///
/// @author Andy Buckley
/// @author Gavin Hesketh
class D0_2008_S7719523 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
D0_2008_S7719523()
: Analysis("D0_2008_S7719523")
{ }
//@}
/// @name Analysis methods
//@{
/// Set up projections and book histograms
void init() {
// General FS
FinalState fs;
declare(fs, "FS");
// Get leading photon
LeadingParticlesFinalState photonfs(FinalState(-1.0, 1.0, 30.0*GeV));
photonfs.addParticleId(PID::PHOTON);
declare(photonfs, "LeadingPhoton");
// FS excluding the leading photon
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(photonfs);
declare(vfs, "JetFS");
// Jets
FastJets jetpro(vfs, FastJets::D0ILCONE, 0.7);
declare(jetpro, "Jets");
// Histograms
book(_h_central_same_cross_section ,1, 1, 1);
book(_h_central_opp_cross_section ,2, 1, 1);
book(_h_forward_same_cross_section ,3, 1, 1);
book(_h_forward_opp_cross_section ,4, 1, 1);
// Ratio histos to be filled by divide()
book(_h_cen_opp_same, 5, 1, 1);
book(_h_fwd_opp_same, 8, 1, 1);
// Ratio histos to be filled manually, since the num/denom inputs don't match
book(_h_cen_same_fwd_same, 6, 1, 1, true);
book(_h_cen_opp_fwd_same, 7, 1, 1, true);
book(_h_cen_same_fwd_opp, 9, 1, 1, true);
book(_h_cen_opp_fwd_opp, 10, 1, 1, true);
}
/// Do the analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
// Get the photon
const FinalState& photonfs = apply<FinalState>(event, "LeadingPhoton");
if (photonfs.particles().size() != 1) {
vetoEvent;
}
const FourMomentum photon = photonfs.particles().front().momentum();
// Isolate photon by ensuring that a 0.4 cone around it contains less than 7% of the photon's energy
double egamma = photon.E();
double eta_P = photon.eta();
double phi_P = photon.phi();
double econe = 0.0;
foreach (const Particle& p, apply<FinalState>(event, "JetFS").particles()) {
if (deltaR(eta_P, phi_P, p.eta(), p.phi()) < 0.4) {
econe += p.E();
// Veto as soon as E_cone gets larger
if (econe/egamma > 0.07) {
MSG_DEBUG("Vetoing event because photon is insufficiently isolated");
vetoEvent;
}
}
}
Jets jets = apply<FastJets>(event, "Jets").jetsByPt(15.0*GeV);
if (jets.empty()) vetoEvent;
FourMomentum leadingJet = jets[0].momentum();
if (deltaR(eta_P, phi_P, leadingJet.eta(), leadingJet.phi()) < 0.7) {
vetoEvent;
}
int photon_jet_sign = sign( leadingJet.rapidity() * photon.rapidity() );
// Veto if leading jet is outside plotted rapidity regions
const double abs_y1 = fabs(leadingJet.rapidity());
if (inRange(abs_y1, 0.8, 1.5) || abs_y1 > 2.5) {
MSG_DEBUG("Leading jet falls outside acceptance range; |y1| = " << abs_y1);
vetoEvent;
}
// Fill histos
if (fabs(leadingJet.rapidity()) < 0.8) {
Histo1DPtr h = (photon_jet_sign >= 1) ? _h_central_same_cross_section : _h_central_opp_cross_section;
- h->fill(photon.pT(), weight);
+ h->fill(photon.pT());
} else if (inRange( fabs(leadingJet.rapidity()), 1.5, 2.5)) {
Histo1DPtr h = (photon_jet_sign >= 1) ? _h_forward_same_cross_section : _h_forward_opp_cross_section;
- h->fill(photon.pT(), weight);
+ h->fill(photon.pT());
}
}
/// Finalize
void finalize() {
const double lumi_gen = sumOfWeights()/crossSection();
const double dy_photon = 2.0;
const double dy_jet_central = 1.6;
const double dy_jet_forward = 2.0;
// Cross-section ratios (6 plots)
// Central/central and forward/forward ratios
divide(_h_central_opp_cross_section, _h_central_same_cross_section, _h_cen_opp_same);
divide(_h_forward_opp_cross_section, _h_forward_same_cross_section, _h_fwd_opp_same);
// Central/forward ratio combinations
/// @note The central/forward histo binnings are not the same! Hence the need to do these by hand :-(
for (size_t i = 0; i < _h_cen_same_fwd_same->numPoints(); ++i) {
const YODA::HistoBin1D& cen_same_bini = _h_central_same_cross_section->bin(i);
const YODA::HistoBin1D& cen_opp_bini = _h_central_opp_cross_section->bin(i);
const YODA::HistoBin1D& fwd_same_bini = _h_central_same_cross_section->bin(i);
const YODA::HistoBin1D& fwd_opp_bini = _h_central_opp_cross_section->bin(i);
_h_cen_same_fwd_same->point(i).setY(_safediv(cen_same_bini.sumW(), fwd_same_bini.sumW(), 0),
add_quad(cen_same_bini.relErr(), fwd_same_bini.relErr()));
_h_cen_opp_fwd_same->point(i).setY(_safediv(cen_opp_bini.sumW(), fwd_same_bini.sumW(), 0),
add_quad(cen_opp_bini.relErr(), fwd_same_bini.relErr()));
_h_cen_same_fwd_opp->point(i).setY(_safediv(cen_same_bini.sumW(), fwd_opp_bini.sumW(), 0),
add_quad(cen_same_bini.relErr(), fwd_opp_bini.relErr()));
_h_cen_opp_fwd_opp->point(i).setY(_safediv(cen_opp_bini.sumW(), fwd_opp_bini.sumW(), 0),
add_quad(cen_opp_bini.relErr(), fwd_opp_bini.relErr()));
}
// Use generator cross section for remaining histograms
// Each of these needs the additional factor 2 because the
// y_photon * y_jet requirement reduces the corresponding 2D "bin width"
// by a factor 1/2.
scale(_h_central_same_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_central);
scale(_h_central_opp_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_central);
scale(_h_forward_same_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_forward);
scale(_h_forward_opp_cross_section, 2.0/lumi_gen * 1.0/dy_photon * 1.0/dy_jet_forward);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_central_same_cross_section;
Histo1DPtr _h_central_opp_cross_section;
Histo1DPtr _h_forward_same_cross_section;
Histo1DPtr _h_forward_opp_cross_section;
Scatter2DPtr _h_cen_opp_same;
Scatter2DPtr _h_fwd_opp_same;
Scatter2DPtr _h_cen_same_fwd_same;
Scatter2DPtr _h_cen_opp_fwd_same;
Scatter2DPtr _h_cen_same_fwd_opp;
Scatter2DPtr _h_cen_opp_fwd_opp;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2008_S7719523);
}
diff --git a/analyses/pluginD0/D0_2008_S7837160.cc b/analyses/pluginD0/D0_2008_S7837160.cc
--- a/analyses/pluginD0/D0_2008_S7837160.cc
+++ b/analyses/pluginD0/D0_2008_S7837160.cc
@@ -1,112 +1,111 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/WFinder.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
namespace Rivet {
/// @brief D0 Run II measurement of W charge asymmetry
/// @author Andy Buckley
/// @author Gavin Hesketh
class D0_2008_S7837160 : public Analysis {
public:
/// Default constructor.
D0_2008_S7837160()
: Analysis("D0_2008_S7837160")
{ }
/// @name Analysis methods
//@{
// Book histograms and set up projections
void init() {
// Projections
FinalState fs;
/// @todo Use separate pT and ETmiss cuts in WFinder
const WFinder wfe(fs, Cuts::abseta < 5 && Cuts::pT > 25*GeV, PID::ELECTRON, 60.0*GeV, 100.0*GeV, 25.0*GeV, 0.2);
declare(wfe, "WFe");
// Histograms (temporary +- charge histos and scatters to store the calculated asymmetries)
for (size_t pmindex = 0; pmindex < 2; ++pmindex) {
const string suffix = (pmindex == 0) ? "plus" : "minus";
book(_hs_dsigpm_deta_25_35[pmindex] ,"TMP/dsigpm_deta_25_35_" + suffix, refData(1, 1, 1));
book(_hs_dsigpm_deta_35[pmindex] ,"TMP/dsigpm_deta_35_" + suffix, refData(1, 1, 2));
book(_hs_dsigpm_deta_25[pmindex] ,"TMP/dsigpm_deta_25_" + suffix, refData(1, 1, 3));
}
book(_h_asym1, 1, 1, 1);
book(_h_asym2, 1, 1, 2);
book(_h_asym3, 1, 1, 3);
}
/// Do the analysis
void analyze(const Event & event) {
const WFinder& wf = apply<WFinder>(event, "WFe");
if (wf.bosons().size() == 0) {
MSG_DEBUG("No W candidates found: vetoing");
vetoEvent;
}
// Get the e+- momentum, and an effective charge including the eta sign
/// @todo Is it correct to multiply the eta sign into the charge to "fold" the plot?
const FourMomentum p_e = wf.constituentLeptons()[0].momentum();
const int chg_e = sign(p_e.eta()) * sign(charge(wf.constituentLeptons()[0]));
assert(chg_e == 1 || chg_e == -1);
MSG_TRACE("Charged lepton sign = " << chg_e);
// Fill histos with appropriate +- indexing
- const double weight = 1.0;
const size_t pmindex = (chg_e > 0) ? 0 : 1;
- if (p_e.Et() < 35*GeV) _hs_dsigpm_deta_25_35[pmindex]->fill(fabs(p_e.eta()), weight);
- else _hs_dsigpm_deta_35[pmindex]->fill(fabs(p_e.eta()), weight);
- _hs_dsigpm_deta_25[pmindex]->fill(fabs(p_e.eta()), weight);
+ if (p_e.Et() < 35*GeV) _hs_dsigpm_deta_25_35[pmindex]->fill(fabs(p_e.eta()));
+ else _hs_dsigpm_deta_35[pmindex]->fill(fabs(p_e.eta()));
+ _hs_dsigpm_deta_25[pmindex]->fill(fabs(p_e.eta()));
}
/// @name Helper functions for constructing asymmetry histograms in finalize()
//@{
void calc_asymm(const Histo1DPtr plus, const Histo1DPtr minus, Scatter2DPtr target) {
divide(*plus - *minus, *plus + *minus, target);
}
void calc_asymm(const Histo1DPtr histos[2], Scatter2DPtr target) {
calc_asymm(histos[0], histos[1], target);
}
//@}
/// @brief Finalize
///
/// Construct asymmetry: (dsig+/deta - dsig-/deta) / (dsig+/deta + dsig-/deta) for each ET region
void finalize() {
calc_asymm(_hs_dsigpm_deta_25_35, _h_asym1);
calc_asymm(_hs_dsigpm_deta_35, _h_asym2);
calc_asymm(_hs_dsigpm_deta_25, _h_asym3);
_h_asym1->scale(1.,100.);
_h_asym2->scale(1.,100.);
_h_asym3->scale(1.,100.);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _hs_dsigpm_deta_25_35[2], _hs_dsigpm_deta_35[2], _hs_dsigpm_deta_25[2];
Scatter2DPtr _h_asym1, _h_asym2, _h_asym3;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2008_S7837160);
}
diff --git a/analyses/pluginD0/D0_2008_S7863608.cc b/analyses/pluginD0/D0_2008_S7863608.cc
--- a/analyses/pluginD0/D0_2008_S7863608.cc
+++ b/analyses/pluginD0/D0_2008_S7863608.cc
@@ -1,134 +1,132 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 differential Z/\f$ \gamma^* \f$ + jet + \f$ X \f$ cross sections
/// @author Gavin Hesketh, Andy Buckley, Frank Siegert
class D0_2008_S7863608 : public Analysis {
public:
/// Constructor
D0_2008_S7863608()
: Analysis("D0_2008_S7863608")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
/// @todo These clustering arguments look odd: are they ok?
Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV;
ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
- _sum_of_weights_inclusive = 0;
+ book(_sum_of_weights_inclusive, "sum_of_weights_inclusive");
book(_h_jet_pT_cross_section ,1, 1, 1);
book(_h_jet_pT_normalised ,1, 1, 2);
book(_h_jet_y_cross_section ,2, 1, 1);
book(_h_jet_y_normalised ,2, 1, 2);
book(_h_Z_pT_cross_section ,3, 1, 1);
book(_h_Z_pT_normalised ,3, 1, 2);
book(_h_Z_y_cross_section ,4, 1, 1);
book(_h_Z_y_normalised ,4, 1, 2);
book(_h_total_cross_section ,5, 1, 1);
}
// Do the analysis
void analyze(const Event& e) {
- const double weight = 1.0;
-
const ZFinder& zfinder = apply<ZFinder>(e, "ZFinder");
if (zfinder.bosons().size()==1) {
- _sum_of_weights_inclusive += weight;
+ _sum_of_weights_inclusive->fill();
const JetAlg& jetpro = apply<JetAlg>(e, "ConeFinder");
const Jets& jets = jetpro.jetsByPt(20*GeV);
Jets jets_cut;
foreach (const Jet& j, jets) {
if (j.abseta() < 2.8) {
jets_cut.push_back(j);
}
}
// Return if there are no jets:
if(jets_cut.size()<1) {
MSG_DEBUG("Skipping event " << numEvents() << " because no jets pass cuts ");
vetoEvent;
}
const FourMomentum Zmom = zfinder.bosons()[0].momentum();
// In jet pT
- _h_jet_pT_cross_section->fill( jets_cut[0].pT(), weight);
- _h_jet_pT_normalised->fill( jets_cut[0].pT(), weight);
- _h_jet_y_cross_section->fill( fabs(jets_cut[0].rapidity()), weight);
- _h_jet_y_normalised->fill( fabs(jets_cut[0].rapidity()), weight);
+ _h_jet_pT_cross_section->fill( jets_cut[0].pT());
+ _h_jet_pT_normalised->fill( jets_cut[0].pT());
+ _h_jet_y_cross_section->fill( fabs(jets_cut[0].rapidity()));
+ _h_jet_y_normalised->fill( fabs(jets_cut[0].rapidity()));
// In Z pT
- _h_Z_pT_cross_section->fill(Zmom.pT(), weight);
- _h_Z_pT_normalised->fill(Zmom.pT(), weight);
- _h_Z_y_cross_section->fill(Zmom.absrap(), weight);
- _h_Z_y_normalised->fill(Zmom.absrap(), weight);
+ _h_Z_pT_cross_section->fill(Zmom.pT());
+ _h_Z_pT_normalised->fill(Zmom.pT());
+ _h_Z_y_cross_section->fill(Zmom.absrap());
+ _h_Z_y_normalised->fill(Zmom.absrap());
- _h_total_cross_section->fill(1960, weight);
+ _h_total_cross_section->fill(1960);
}
}
/// Finalize
void finalize() {
const double invlumi = crossSection()/sumOfWeights();
scale(_h_total_cross_section, invlumi);
scale(_h_jet_pT_cross_section, invlumi);
scale(_h_jet_y_cross_section, invlumi);
scale(_h_Z_pT_cross_section, invlumi);
scale(_h_Z_y_cross_section, invlumi);
double factor=1/_sum_of_weights_inclusive;
if (_sum_of_weights_inclusive == 0) factor = 0;
scale(_h_jet_pT_normalised, factor);
scale(_h_jet_y_normalised, factor);
scale(_h_Z_pT_normalised, factor);
scale(_h_Z_y_normalised, factor);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet_pT_cross_section;
Histo1DPtr _h_jet_y_cross_section;
Histo1DPtr _h_Z_pT_cross_section;
Histo1DPtr _h_Z_y_cross_section;
Histo1DPtr _h_total_cross_section;
Histo1DPtr _h_jet_pT_normalised;
Histo1DPtr _h_jet_y_normalised;
Histo1DPtr _h_Z_pT_normalised;
Histo1DPtr _h_Z_y_normalised;
//@}
- double _sum_of_weights_inclusive;
+ CounterPtr _sum_of_weights_inclusive;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2008_S7863608);
}
diff --git a/analyses/pluginD0/D0_2009_S8202443.cc b/analyses/pluginD0/D0_2009_S8202443.cc
--- a/analyses/pluginD0/D0_2009_S8202443.cc
+++ b/analyses/pluginD0/D0_2009_S8202443.cc
@@ -1,126 +1,126 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 Z + jet + \f$ X \f$ cross-section / \f$ p_\perp \f$ distributions
class D0_2009_S8202443 : public Analysis {
public:
/// Constructor
D0_2009_S8202443()
- : Analysis("D0_2009_S8202443"),
- _sum_of_weights(0), _sum_of_weights_constrained(0)
+ : Analysis("D0_2009_S8202443")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
FinalState fs;
// Leptons in constrained tracking acceptance
Cut cuts = (Cuts::abseta < 1.1 || Cuts::absetaIn(1.5, 2.5)) && Cuts::pT > 25*GeV;
ZFinder zfinder_constrained(fs, cuts, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder_constrained, "ZFinderConstrained");
FastJets conefinder_constrained(zfinder_constrained.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder_constrained, "ConeFinderConstrained");
// Unconstrained leptons
ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
book(_h_jet1_pT_constrained ,1, 1, 1);
book(_h_jet2_pT_constrained ,3, 1, 1);
book(_h_jet3_pT_constrained ,5, 1, 1);
book(_h_jet1_pT ,2, 1, 1);
book(_h_jet2_pT ,4, 1, 1);
book(_h_jet3_pT ,6, 1, 1);
+
+ book(_sum_of_weights,"sum_of_weights");
+ book(_sum_of_weights_constrained, "sum_of_weights_constrained");
}
// Do the analysis
void analyze(const Event& e) {
- double weight = 1.0;
-
// Unconstrained electrons
const ZFinder& zfinder = apply<ZFinder>(e, "ZFinder");
if (zfinder.bosons().size() == 0) {
MSG_DEBUG("No unique lepton pair found.");
vetoEvent;
}
- _sum_of_weights += weight;
+ _sum_of_weights->fill();
const Jets jets_cut = apply<JetAlg>(e, "ConeFinder").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.5);
if (jets_cut.size() > 0)
- _h_jet1_pT->fill(jets_cut[0].pT()/GeV, weight);
+ _h_jet1_pT->fill(jets_cut[0].pT()/GeV);
if (jets_cut.size() > 1)
- _h_jet2_pT->fill(jets_cut[1].pT()/GeV, weight);
+ _h_jet2_pT->fill(jets_cut[1].pT()/GeV);
if (jets_cut.size() > 2)
- _h_jet3_pT->fill(jets_cut[2].pT()/GeV, weight);
+ _h_jet3_pT->fill(jets_cut[2].pT()/GeV);
// Constrained electrons
const ZFinder& zfinder_constrained = apply<ZFinder>(e, "ZFinderConstrained");
if (zfinder_constrained.bosons().size() == 0) {
MSG_DEBUG("No unique constrained lepton pair found.");
return; // Not really a "veto", since if we got this far there is an unconstrained Z
}
- _sum_of_weights_constrained += weight;
+ _sum_of_weights_constrained->fill();
const Jets& jets_constrained = apply<JetAlg>(e, "ConeFinderConstrained").jetsByPt(20*GeV);
/// @todo Replace this explicit selection with a Cut
Jets jets_cut_constrained;
foreach (const Jet& j, jets_constrained) {
if (j.abseta() < 2.5) jets_cut_constrained.push_back(j);
}
if (jets_cut_constrained.size() > 0)
- _h_jet1_pT_constrained->fill(jets_cut_constrained[0].pT()/GeV, weight);
+ _h_jet1_pT_constrained->fill(jets_cut_constrained[0].pT()/GeV);
if (jets_cut_constrained.size() > 1)
- _h_jet2_pT_constrained->fill(jets_cut_constrained[1].pT()/GeV, weight);
+ _h_jet2_pT_constrained->fill(jets_cut_constrained[1].pT()/GeV);
if (jets_cut_constrained.size() > 2)
- _h_jet3_pT_constrained->fill(jets_cut_constrained[2].pT()/GeV, weight);
+ _h_jet3_pT_constrained->fill(jets_cut_constrained[2].pT()/GeV);
}
// Finalize
void finalize() {
scale(_h_jet1_pT, 1/_sum_of_weights);
scale(_h_jet2_pT, 1/_sum_of_weights);
scale(_h_jet3_pT, 1/_sum_of_weights);
scale(_h_jet1_pT_constrained, 1/_sum_of_weights_constrained);
scale(_h_jet2_pT_constrained, 1/_sum_of_weights_constrained);
scale(_h_jet3_pT_constrained, 1/_sum_of_weights_constrained);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet1_pT;
Histo1DPtr _h_jet2_pT;
Histo1DPtr _h_jet3_pT;
Histo1DPtr _h_jet1_pT_constrained;
Histo1DPtr _h_jet2_pT_constrained;
Histo1DPtr _h_jet3_pT_constrained;
//@}
- double _sum_of_weights, _sum_of_weights_constrained;
+ CounterPtr _sum_of_weights, _sum_of_weights_constrained;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2009_S8202443);
}
diff --git a/analyses/pluginD0/D0_2009_S8349509.cc b/analyses/pluginD0/D0_2009_S8349509.cc
--- a/analyses/pluginD0/D0_2009_S8349509.cc
+++ b/analyses/pluginD0/D0_2009_S8349509.cc
@@ -1,171 +1,168 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 Z+jets angular distributions
class D0_2009_S8349509 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
D0_2009_S8349509()
- : Analysis("D0_2009_S8349509"),
- _inclusive_Z_sumofweights(0)
+ : Analysis("D0_2009_S8349509")
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms
void init() {
Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV;
ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
book(_h_dphi_jet_Z25 ,1, 1, 1);
book(_h_dphi_jet_Z45 ,2, 1, 1);
book(_h_dy_jet_Z25 ,3, 1, 1);
book(_h_dy_jet_Z45 ,4, 1, 1);
book(_h_yboost_jet_Z25 ,5, 1, 1);
book(_h_yboost_jet_Z45 ,6, 1, 1);
book(_h_dphi_jet_Z25_xs ,1, 1, 2);
book(_h_dphi_jet_Z45_xs ,2, 1, 2);
book(_h_dy_jet_Z25_xs ,3, 1, 2);
book(_h_dy_jet_Z45_xs ,4, 1, 2);
book(_h_yboost_jet_Z25_xs ,5, 1, 2);
book(_h_yboost_jet_Z45_xs ,6, 1, 2);
- _inclusive_Z_sumofweights = 0;
+ book(_inclusive_Z_sumofweights, "_inclusive_Z_sumofweights");
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
const ZFinder& zfinder = apply<ZFinder>(event, "ZFinder");
if (zfinder.bosons().size() == 1) {
// count inclusive sum of weights for histogram normalisation
- _inclusive_Z_sumofweights += weight;
+ _inclusive_Z_sumofweights->fill();
const FourMomentum& zmom = zfinder.bosons()[0].momentum();
if (zmom.pT() < 25*GeV) vetoEvent;
Jets jets;
foreach (const Jet& j, apply<JetAlg>(event, "ConeFinder").jetsByPt(20*GeV)) {
if (j.abseta() < 2.8) {
jets.push_back(j);
break;
}
}
// Return if there are no jets:
if (jets.size() < 1) {
MSG_DEBUG("Skipping event " << numEvents() << " because no jets pass cuts ");
vetoEvent;
}
const FourMomentum& jetmom = jets[0].momentum();
const double yZ = zmom.rapidity();
const double yjet = jetmom.rapidity();
const double dphi = deltaPhi(zmom, jetmom);
const double dy = deltaRap(zmom, jetmom);
const double yboost = fabs(yZ+yjet)/2;
if (zmom.pT() > 25*GeV) {
- _h_dphi_jet_Z25->fill(dphi, weight);
- _h_dy_jet_Z25->fill(dy, weight);
- _h_yboost_jet_Z25->fill(yboost, weight);
- _h_dphi_jet_Z25_xs->fill(dphi, weight);
- _h_dy_jet_Z25_xs->fill(dy, weight);
- _h_yboost_jet_Z25_xs->fill(yboost, weight);
+ _h_dphi_jet_Z25->fill(dphi);
+ _h_dy_jet_Z25->fill(dy);
+ _h_yboost_jet_Z25->fill(yboost);
+ _h_dphi_jet_Z25_xs->fill(dphi);
+ _h_dy_jet_Z25_xs->fill(dy);
+ _h_yboost_jet_Z25_xs->fill(yboost);
}
if (zmom.pT() > 45*GeV) {
- _h_dphi_jet_Z45->fill(dphi, weight);
- _h_dy_jet_Z45->fill(dy, weight);
- _h_yboost_jet_Z45->fill(yboost, weight);
- _h_dphi_jet_Z45_xs->fill(dphi, weight);
- _h_dy_jet_Z45_xs->fill(dy, weight);
- _h_yboost_jet_Z45_xs->fill(yboost, weight);
+ _h_dphi_jet_Z45->fill(dphi);
+ _h_dy_jet_Z45->fill(dy);
+ _h_yboost_jet_Z45->fill(yboost);
+ _h_dphi_jet_Z45_xs->fill(dphi);
+ _h_dy_jet_Z45_xs->fill(dy);
+ _h_yboost_jet_Z45_xs->fill(yboost);
}
}
}
void finalize() {
if (_inclusive_Z_sumofweights == 0) return;
scale(_h_dphi_jet_Z25, 1/_inclusive_Z_sumofweights);
scale(_h_dphi_jet_Z45, 1/_inclusive_Z_sumofweights);
scale(_h_dy_jet_Z25, 1/_inclusive_Z_sumofweights);
scale(_h_dy_jet_Z45, 1/_inclusive_Z_sumofweights);
scale(_h_yboost_jet_Z25, 1/_inclusive_Z_sumofweights);
scale(_h_yboost_jet_Z45, 1/_inclusive_Z_sumofweights);
scale(_h_dphi_jet_Z25_xs, crossSectionPerEvent());
scale(_h_dphi_jet_Z45_xs, crossSectionPerEvent());
scale(_h_dy_jet_Z25_xs, crossSectionPerEvent());
scale(_h_dy_jet_Z45_xs, crossSectionPerEvent());
scale(_h_yboost_jet_Z25_xs, crossSectionPerEvent());
scale(_h_yboost_jet_Z45_xs, crossSectionPerEvent());
}
//@}
private:
// Data members like post-cuts event weight counters go here
private:
/// @name Histograms (normalised)
//@{
Histo1DPtr _h_dphi_jet_Z25;
Histo1DPtr _h_dphi_jet_Z45;
Histo1DPtr _h_dy_jet_Z25;
Histo1DPtr _h_dy_jet_Z45;
Histo1DPtr _h_yboost_jet_Z25;
Histo1DPtr _h_yboost_jet_Z45;
//@}
/// @name Histograms (absolute cross sections)
//@{
Histo1DPtr _h_dphi_jet_Z25_xs;
Histo1DPtr _h_dphi_jet_Z45_xs;
Histo1DPtr _h_dy_jet_Z25_xs;
Histo1DPtr _h_dy_jet_Z45_xs;
Histo1DPtr _h_yboost_jet_Z25_xs;
Histo1DPtr _h_yboost_jet_Z45_xs;
//@}
- double _inclusive_Z_sumofweights;
+ CounterPtr _inclusive_Z_sumofweights;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2009_S8349509);
}
diff --git a/analyses/pluginD0/D0_2010_S8671338.cc b/analyses/pluginD0/D0_2010_S8671338.cc
--- a/analyses/pluginD0/D0_2010_S8671338.cc
+++ b/analyses/pluginD0/D0_2010_S8671338.cc
@@ -1,70 +1,69 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @brief Measurement of Z(->muon muon) pT differential cross-section
/// @author Flavia Dias
class D0_2010_S8671338 : public Analysis {
public:
/// Constructor
D0_2010_S8671338()
: Analysis("D0_2010_S8671338")
{ }
///@name Analysis methods
//@{
/// Add projections and book histograms
void init() {
Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV;
ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK);
declare(zfinder, "ZFinder");
book(_h_Z_pT_normalised ,1, 1, 1);
book(_h_Z_pT_xs ,2, 1, 1);
}
// Do the analysis
void analyze(const Event& e) {
- const double weight = 1.0;
const ZFinder& zfinder = apply<ZFinder>(e, "ZFinder");
if (zfinder.bosons().size()==1) {
double ZpT = zfinder.bosons()[0].pT()/GeV;
- _h_Z_pT_normalised->fill(ZpT, weight);
- _h_Z_pT_xs->fill(ZpT, weight);
+ _h_Z_pT_normalised->fill(ZpT);
+ _h_Z_pT_xs->fill(ZpT);
}
}
/// Finalize
void finalize() {
normalize(_h_Z_pT_normalised);
scale(_h_Z_pT_xs, crossSection()/sumOfWeights());
}
//@}
private:
/// @name Histogram
Histo1DPtr _h_Z_pT_normalised;
Histo1DPtr _h_Z_pT_xs;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2010_S8671338);
}
diff --git a/analyses/pluginD0/D0_2011_I895662.cc b/analyses/pluginD0/D0_2011_I895662.cc
--- a/analyses/pluginD0/D0_2011_I895662.cc
+++ b/analyses/pluginD0/D0_2011_I895662.cc
@@ -1,95 +1,93 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
class D0_2011_I895662 : public Analysis {
public:
D0_2011_I895662()
: Analysis("D0_2011_I895662")
{ }
public:
void init() {
FastJets jets(FinalState(-3.6, 3.6, 0.*GeV), FastJets::D0ILCONE, 0.7);
jets.useInvisibles();
declare(jets, "Jets");
book(_h_m3j_08_40 ,1, 1, 1);
book(_h_m3j_16_40 ,2, 1, 1);
book(_h_m3j_24_40 ,3, 1, 1);
book(_h_m3j_24_70 ,4, 1, 1);
book(_h_m3j_24_100 ,5, 1, 1);
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
Jets jets = apply<FastJets>(event, "Jets").jetsByPt(40.*GeV);
// Need three jets, leading jet above 150 GeV
if (jets.size() < 3 || jets[0].pT() <= 150.*GeV) vetoEvent;
std::vector<FourMomentum> p;
for (size_t i=0; i<3; i++) {
p.push_back(jets[i].momentum());
}
// Jets need to be separated by 2*Rcone
if (deltaR(p[0], p[1], RAPIDITY) < 1.4 ||
deltaR(p[0], p[2], RAPIDITY) < 1.4 ||
deltaR(p[1], p[2], RAPIDITY) < 1.4)
vetoEvent;
// Leading three jets need to be within |y|<2.4
double ymax = fabs(p[0].rapidity());
for (size_t i=1; i<3; i++) {
if (ymax < fabs(p[i].rapidity())) ymax = fabs(p[i].rapidity());
}
if (ymax >= 2.4) vetoEvent;
double m3jet = (p[0]+p[1]+p[2]).mass()/GeV;
- if (ymax < 0.8) _h_m3j_08_40->fill(m3jet, weight);
- if (ymax < 1.6) _h_m3j_16_40->fill(m3jet, weight);
+ if (ymax < 0.8) _h_m3j_08_40->fill(m3jet);
+ if (ymax < 1.6) _h_m3j_16_40->fill(m3jet);
if (ymax < 2.4) {
- _h_m3j_24_40->fill(m3jet, weight);
- if (p[2].pT() > 70.*GeV) _h_m3j_24_70->fill(m3jet, weight);
- if (p[2].pT() > 100.*GeV) _h_m3j_24_100->fill(m3jet, weight);
+ _h_m3j_24_40->fill(m3jet);
+ if (p[2].pT() > 70.*GeV) _h_m3j_24_70->fill(m3jet);
+ if (p[2].pT() > 100.*GeV) _h_m3j_24_100->fill(m3jet);
}
}
void finalize() {
// Factor of 1000 is based on GeV <-> TeV mismatch between paper and Hepdata table
scale(_h_m3j_08_40, 1000*crossSection()/picobarn/sumOfWeights());
scale(_h_m3j_16_40, 1000*crossSection()/picobarn/sumOfWeights());
scale(_h_m3j_24_40, 1000*crossSection()/picobarn/sumOfWeights());
scale(_h_m3j_24_70, 1000*crossSection()/picobarn/sumOfWeights());
scale(_h_m3j_24_100, 1000*crossSection()/picobarn/sumOfWeights());
}
private:
Histo1DPtr _h_m3j_08_40;
Histo1DPtr _h_m3j_16_40;
Histo1DPtr _h_m3j_24_40;
Histo1DPtr _h_m3j_24_70;
Histo1DPtr _h_m3j_24_100;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2011_I895662);
}
diff --git a/analyses/pluginHERA/H1_1994_S2919893.cc b/analyses/pluginHERA/H1_1994_S2919893.cc
--- a/analyses/pluginHERA/H1_1994_S2919893.cc
+++ b/analyses/pluginHERA/H1_1994_S2919893.cc
@@ -1,226 +1,229 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Math/Constants.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/DISKinematics.hh"
namespace Rivet {
/// @brief H1 energy flow and charged particle spectra
/// @author Peter Richardson
/// Based on the equivalent HZTool analysis
class H1_1994_S2919893 : public Analysis {
public:
/// Constructor
H1_1994_S2919893()
: Analysis("H1_1994_S2919893")
- {
-
- // Initialise member variables
- _w77 = make_pair(0.0, 0.0);
- _w122 = make_pair(0.0, 0.0);
- _w169 = make_pair(0.0, 0.0);
- _w117 = make_pair(0.0, 0.0);
- _wEnergy = make_pair(0.0, 0.0);
- }
+ {}
/// @name Analysis methods
//@{
/// Initialise projections and histograms
void init() {
// Projections
declare(DISLepton(), "Lepton");
declare(DISKinematics(), "Kinematics");
declare(FinalState(), "FS");
// Histos
book(_histEnergyFlowLowX ,1, 1, 1);
book(_histEnergyFlowHighX ,1, 1, 2);
book(_histEECLowX ,2, 1, 1);
book(_histEECHighX ,2, 1, 2);
book(_histSpectraW77 ,3, 1, 1);
book(_histSpectraW122 ,3, 1, 2);
book(_histSpectraW169 ,3, 1, 3);
book(_histSpectraW117 ,3, 1, 4);
book(_histPT2 ,4, 1, 1);
+
+ book(_w77 .first, "TMP/w77_1");
+ book(_w122.first, "TMP/w122_1");
+ book(_w169.first, "TMP/w169_1");
+ book(_w117.first, "TMP/w117_1");
+ book(_wEnergy.first, "TMP/wEnergy_1");
+
+ book(_w77 .second, "TMP/w77_2");
+ book(_w122.second, "TMP/w122_2");
+ book(_w169.second, "TMP/w169_2");
+ book(_w117.second, "TMP/w117_2");
+ book(_wEnergy.second, "TMP/wEnergy_2");
}
/// Analyse each event
void analyze(const Event& event) {
// Get the DIS kinematics
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
const double x = dk.x();
const double w2 = dk.W2();
const double w = sqrt(w2);
// Momentum of the scattered lepton
const DISLepton& dl = apply<DISLepton>(event,"Lepton");
const FourMomentum leptonMom = dl.out();
const double ptel = leptonMom.pT();
const double enel = leptonMom.E();
const double thel = leptonMom.angle(dk.beamHadron().mom())/degree;
// Extract the particles other than the lepton
const FinalState& fs = apply<FinalState>(event, "FS");
Particles particles;
particles.reserve(fs.particles().size());
const GenParticle* dislepGP = dl.out().genParticle();
foreach (const Particle& p, fs.particles()) {
const GenParticle* loopGP = p.genParticle();
if (loopGP == dislepGP) continue;
particles.push_back(p);
}
// Cut on the forward energy
double efwd = 0.0;
foreach (const Particle& p, particles) {
const double th = p.angle(dk.beamHadron())/degree;
if (inRange(th, 4.4, 15)) efwd += p.E();
}
// Apply the cuts
// Lepton energy and angle, w2 and forward energy
MSG_DEBUG("enel/GeV = " << enel/GeV << ", thel = " << thel
<< ", w2 = " << w2 << ", efwd/GeV = " << efwd/GeV);
bool cut = enel/GeV > 14. && thel > 157. && thel < 172.5 && w2 >= 3000. && efwd/GeV > 0.5;
if (!cut) vetoEvent;
// Weight of the event
- const double weight = 1.0;
- (x < 1e-3 ? _wEnergy.first : _wEnergy.second) += weight;
+ (x < 1e-3 ? _wEnergy.first : _wEnergy.second)->fill();
// Boost to hadronic CM
const LorentzTransform hcmboost = dk.boostHCM();
// Loop over the particles
long ncharged(0);
for (size_t ip1 = 0; ip1 < particles.size(); ++ip1) {
const Particle& p = particles[ip1];
const double th = p.angle(dk.beamHadron().momentum()) / degree;
// Boost momentum to lab
const FourMomentum hcmMom = hcmboost.transform(p.momentum());
// Angular cut
if (th <= 4.4) continue;
// Energy flow histogram
const double et = fabs(hcmMom.Et());
const double eta = hcmMom.eta();
- (x < 1e-3 ? _histEnergyFlowLowX : _histEnergyFlowHighX)->fill(eta, et*weight);
+ (x < 1e-3 ? _histEnergyFlowLowX : _histEnergyFlowHighX)->fill(eta, et);
if (PID::threeCharge(p.pid()) != 0) {
/// @todo Use units in w comparisons... what are the units?
if (w > 50. && w <= 200.) {
double xf= 2 * hcmMom.z() / w;
double pt2 = hcmMom.pT2();
if (w > 50. && w <= 100.) {
- _histSpectraW77 ->fill(xf, weight);
+ _histSpectraW77 ->fill(xf);
} else if (w > 100. && w <= 150.) {
- _histSpectraW122->fill(xf, weight);
+ _histSpectraW122->fill(xf);
} else if (w > 150. && w <= 200.) {
- _histSpectraW169->fill(xf, weight);
+ _histSpectraW169->fill(xf);
}
- _histSpectraW117->fill(xf, weight);
+ _histSpectraW117->fill(xf);
/// @todo Is this profile meant to be filled with 2 weight factors?
- _histPT2->fill(xf, pt2*weight/GeV2, weight);
+ _histPT2->fill(xf, pt2/GeV2);
++ncharged;
}
}
// Energy-energy correlation
if (th <= 8.) continue;
double phi1 = p.phi(ZERO_2PI);
double eta1 = p.eta();
double et1 = fabs(p.momentum().Et());
for (size_t ip2 = ip1+1; ip2 < particles.size(); ++ip2) {
const Particle& p2 = particles[ip2];
//double th2 = beamAngle(p2.momentum(), order);
double th2 = p2.angle(dk.beamHadron().momentum()) / degree;
if (th2 <= 8.) continue;
double phi2 = p2.phi(ZERO_2PI);
/// @todo Use angle function
double deltaphi = phi1 - phi2;
if (fabs(deltaphi) > PI) deltaphi = fabs(fabs(deltaphi) - TWOPI);
double eta2 = p2.eta();
double omega = sqrt(sqr(eta1-eta2) + sqr(deltaphi));
double et2 = fabs(p2.momentum().Et());
- double wt = et1*et2 / sqr(ptel) * weight;
+ double wt = et1*et2 / sqr(ptel);
(x < 1e-3 ? _histEECLowX : _histEECHighX)->fill(omega, wt);
}
}
// Factors for normalization
if (w > 50. && w <= 200.) {
if (w <= 100.) {
- _w77.first += ncharged*weight;
- _w77.second += weight;
+ _w77.first ->fill(ncharged);
+ _w77.second->fill();
} else if (w <= 150.) {
- _w122.first += ncharged*weight;
- _w122.second += weight;
+ _w122.first ->fill(ncharged);
+ _w122.second->fill();
} else {
- _w169.first += ncharged*weight;
- _w169.second += weight;
+ _w169.first ->fill(ncharged);
+ _w169.second->fill();
}
- _w117.first += ncharged*weight;
- _w117.second += weight;
+ _w117.first ->fill(ncharged);
+ _w117.second->fill();
}
}
// Normalize inclusive single particle distributions to the average number of charged particles per event.
void finalize() {
normalize(_histSpectraW77, _w77.first/_w77.second);
normalize(_histSpectraW122, _w122.first/_w122.second);
normalize(_histSpectraW169, _w169.first/_w169.second);
normalize(_histSpectraW117, _w117.first/_w117.second);
scale(_histEnergyFlowLowX , 1./_wEnergy.first );
scale(_histEnergyFlowHighX, 1./_wEnergy.second);
scale(_histEECLowX , 1./_wEnergy.first );
scale(_histEECHighX, 1./_wEnergy.second);
}
//@}
private:
/// Polar angle with right direction of the beam
inline double beamAngle(const FourVector& v, bool order) {
double thel = v.polarAngle()/degree;
if (thel < 0) thel += 180.;
if (!order) thel = 180 - thel;
return thel;
}
/// @name Histograms
//@{
Histo1DPtr _histEnergyFlowLowX, _histEnergyFlowHighX;
Histo1DPtr _histEECLowX, _histEECHighX;
Histo1DPtr _histSpectraW77, _histSpectraW122, _histSpectraW169, _histSpectraW117;
Profile1DPtr _histPT2;
//@}
/// @name Storage of weights to calculate averages for normalisation
//@{
- pair<double,double> _w77, _w122, _w169, _w117, _wEnergy;
+ pair<CounterPtr,CounterPtr> _w77, _w122, _w169, _w117, _wEnergy;
//@}
};
DECLARE_RIVET_PLUGIN(H1_1994_S2919893);
}
diff --git a/analyses/pluginHERA/H1_1995_S3167097.cc b/analyses/pluginHERA/H1_1995_S3167097.cc
--- a/analyses/pluginHERA/H1_1995_S3167097.cc
+++ b/analyses/pluginHERA/H1_1995_S3167097.cc
@@ -1,128 +1,128 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/DISFinalState.hh"
#include "Rivet/Projections/CentralEtHCM.hh"
namespace Rivet {
/// H1 energy flow in DIS
///
/// @todo Make histograms match those in HepData and use autobooking
///
/// @author Leif Lonnblad
/// @author Andy Buckley
class H1_1995_S3167097 : public Analysis {
public:
/// Constructor
H1_1995_S3167097()
: Analysis("H1_1995_S3167097")
{ }
/// @name Analysis methods
//@{
void init() {
// Projections
const DISKinematics& diskin = declare(DISKinematics(), "Kinematics");
const DISFinalState& fshcm = declare(DISFinalState(diskin, DISFinalState::HCM), "FS");
declare(CentralEtHCM(fshcm), "Y1HCM");
// Histograms
/// @todo Convert to use autobooking and correspond to HepData data tables
- _sumw.resize(9);
+
_hEtFlow.resize(9);
for (size_t i = 0; i < 9; ++i) {
book(_sumw[i], "sumW_" + to_str(i));
book(_hEtFlow[i] ,to_str(i), 24, -6, 6);
}
book(_tmphAvEt, "TMP/hAvEt", 9, 1.0, 10.0);
book(_tmphAvX , "TMP/hAvX", 9, 1.0, 10.0);
book(_tmphAvQ2, "TMP/hAvQ2", 9, 1.0, 10.0);
book(_tmphN , "TMP/hN", 9, 1.0, 10.0);
}
/// Calculate the bin number from the DISKinematics projection
/// @todo Convert to use a HEPUtils Binning1D
size_t _getbin(const DISKinematics& dk) {
if (inRange(dk.Q2()/GeV2, 5.0, 10.0)) {
if (inRange(dk.x(), 1e-4, 2e-4)) return 0;
if (inRange(dk.x(), 2e-4, 5e-4) && dk.Q2() > 6.0*GeV2) return 1;
} else if (inRange(dk.Q2()/GeV2, 10.0, 20.0)) {
if (inRange(dk.x(), 2e-4, 5e-4)) return 2;
if (inRange(dk.x(), 5e-4, 8e-4)) return 3;
if (inRange(dk.x(), 8e-4, 1.5e-3)) return 4;
if (inRange(dk.x(), 1.5e-3, 4e-3)) return 5;
} else if (inRange(dk.Q2()/GeV2, 20.0, 50.0)) {
if (inRange(dk.x(), 5e-4, 1.4e-3)) return 6;
if (inRange(dk.x(), 1.4e-3, 3e-3)) return 7;
if (inRange(dk.x(), 3e-3, 1e-2)) return 8;
}
return -1;
}
void analyze(const Event& event) {
const FinalState& fs = apply<FinalState>(event, "FS");
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
const CentralEtHCM& y1 = apply<CentralEtHCM>(event, "Y1HCM");
const int ibin = _getbin(dk);
if (ibin < 0) vetoEvent;
_sumw[ibin]->fill();
for (size_t i = 0, N = fs.particles().size(); i < N; ++i) {
const double rap = fs.particles()[i].rapidity();
const double et = fs.particles()[i].Et();
_hEtFlow[ibin]->fill(rap, et/GeV);
}
/// @todo Use fillBin?
_tmphAvEt->fill(ibin + 1.5, y1.sumEt()/GeV);
_tmphAvX->fill(ibin + 1.5, dk.x());
_tmphAvQ2->fill(ibin + 1.5, dk.Q2()/GeV2);
_tmphN->fill(ibin + 1.5);
}
void finalize() {
for (size_t ibin = 0; ibin < 9; ++ibin)
scale(_hEtFlow[ibin], 0.5/_sumw[ibin]);
/// @todo Improve this!
Scatter2DPtr s21,s22,s23;
divide(_tmphAvEt,_tmphN,s21);
book(s21, "21");
divide(_tmphAvX,_tmphN,s22);
book(s22, "22");
divide(_tmphAvQ2,_tmphN,s23);
book(s23, "23");
// addAnalysisObject(make_shared<Scatter2D>(_tmphAvEt/_tmphN, histoPath("21")) );
// addAnalysisObject(make_shared<Scatter2D>(_tmphAvX/_tmphN, histoPath("22")) );
// addAnalysisObject(make_shared<Scatter2D>(_tmphAvQ2/_tmphN, histoPath("23")) );
}
//@}
private:
/// Histograms for the \f$ E_T \f$ flow
vector<Histo1DPtr> _hEtFlow;
/// Temporary histograms for averages in different kinematical bins.
Histo1DPtr _tmphAvEt, _tmphAvX, _tmphAvQ2, _tmphN;
/// Weights counters for each kinematic bin
- vector<CounterPtr> _sumw;
+ array<CounterPtr, 9> _sumw;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(H1_1995_S3167097);
}
diff --git a/analyses/pluginHERA/H1_2000_S4129130.cc b/analyses/pluginHERA/H1_2000_S4129130.cc
--- a/analyses/pluginHERA/H1_2000_S4129130.cc
+++ b/analyses/pluginHERA/H1_2000_S4129130.cc
@@ -1,259 +1,258 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Math/Constants.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/DISKinematics.hh"
namespace Rivet {
/// @brief H1 energy flow and charged particle spectra
///
/// @author Peter Richardson
///
/// Based on the HZTOOL analysis HZ99091
class H1_2000_S4129130 : public Analysis {
public:
/// Constructor
H1_2000_S4129130()
: Analysis("H1_2000_S4129130")
{ }
/// @name Analysis methods
//@{
/// Initialise projections and histograms
void init() {
// Projections
declare(DISLepton(), "Lepton");
declare(DISKinematics(), "Kinematics");
declare(FinalState(), "FS");
// Histos
Histo1DPtr h;
// Histograms and weight vectors for low Q^2 a
for (size_t ix = 0; ix < 17; ++ix) {
book(h ,ix+1, 1, 1);
_histETLowQa.push_back(h);
- _weightETLowQa.push_back(0.);
+ book(_weightETLowQa[ix], "TMP/ETLowQa");
}
// Histograms and weight vectors for high Q^2 a
for (size_t ix = 0; ix < 7; ++ix) {
book(h ,ix+18, 1, 1);
_histETHighQa.push_back(h);
- _weightETHighQa.push_back(0.);
+ book(_weightETHighQa[ix], "TMP/ETHighQa");
}
// Histograms and weight vectors for low Q^2 b
for (size_t ix = 0; ix < 5; ++ix) {
book(h ,ix+25, 1, 1);
_histETLowQb.push_back(h);
- _weightETLowQb.push_back(0.);
+ book(_weightETLowQb[ix], "TMP/ETLowQb");
}
// Histograms and weight vectors for high Q^2 b
for (size_t ix = 0; ix < 3; ++ix) {
book(h ,30+ix, 1, 1);
_histETHighQb.push_back(h);
- _weightETHighQb.push_back(0.0);
+ book(_weightETHighQb[ix], "TMP/ETHighQb");
}
// Histograms for the averages
book(_histAverETCentral ,33, 1, 1);
book(_histAverETFrag ,34, 1, 1);
}
/// Analyze each event
void analyze(const Event& event) {
// DIS kinematics
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
double q2 = dk.Q2();
double x = dk.x();
double y = dk.y();
double w2 = dk.W2();
// Kinematics of the scattered lepton
const DISLepton& dl = apply<DISLepton>(event,"Lepton");
const FourMomentum leptonMom = dl.out();
const double enel = leptonMom.E();
const double thel = 180 - leptonMom.angle(dl.in().mom())/degree;
// Extract the particles other than the lepton
const FinalState& fs = apply<FinalState>(event, "FS");
Particles particles; particles.reserve(fs.size());
const GenParticle* dislepGP = dl.out().genParticle(); ///< @todo Is the GenParticle stuff necessary? (Not included in Particle::==?)
foreach (const Particle& p, fs.particles()) {
const GenParticle* loopGP = p.genParticle();
if (loopGP == dislepGP) continue;
particles.push_back(p);
}
// Cut on the forward energy
double efwd = 0.;
foreach (const Particle& p, particles) {
const double th = 180 - p.angle(dl.in())/degree;
if (inRange(th, 4.4, 15.0)) efwd += p.E();
}
// There are four possible selections for events
bool evcut[4];
// Low Q2 selection a
evcut[0] = enel/GeV > 12. && w2 >= 4400.*GeV2 && efwd/GeV > 0.5 && inRange(thel,157.,176.);
// Low Q2 selection b
evcut[1] = enel/GeV > 12. && inRange(y,0.3,0.5);
// High Q2 selection a
evcut[2] = inRange(thel,12.,150.) && inRange(y,0.05,0.6) && w2 >= 4400.*GeV2 && efwd > 0.5;
// High Q2 selection b
evcut[3] = inRange(thel,12.,150.) && inRange(y,0.05,0.6) && inRange(w2,27110.*GeV2,45182.*GeV2);
// Veto if fails all cuts
/// @todo Can we use all()?
if (! (evcut[0] || evcut[1] || evcut[2] || evcut[3]) ) vetoEvent;
// Find the bins
int bin[4] = {-1,-1,-1,-1};
// For the low Q2 selection a)
if (q2 > 2.5*GeV && q2 <= 5.*GeV) {
if (x > 0.00005 && x <= 0.0001 ) bin[0] = 0;
if (x > 0.0001 && x <= 0.0002 ) bin[0] = 1;
if (x > 0.0002 && x <= 0.00035) bin[0] = 2;
if (x > 0.00035 && x <= 0.0010 ) bin[0] = 3;
}
else if (q2 > 5.*GeV && q2 <= 10.*GeV) {
if (x > 0.0001 && x <= 0.0002 ) bin[0] = 4;
if (x > 0.0002 && x <= 0.00035) bin[0] = 5;
if (x > 0.00035 && x <= 0.0007 ) bin[0] = 6;
if (x > 0.0007 && x <= 0.0020 ) bin[0] = 7;
}
else if (q2 > 10.*GeV && q2 <= 20.*GeV) {
if (x > 0.0002 && x <= 0.0005) bin[0] = 8;
if (x > 0.0005 && x <= 0.0008) bin[0] = 9;
if (x > 0.0008 && x <= 0.0015) bin[0] = 10;
if (x > 0.0015 && x <= 0.040 ) bin[0] = 11;
}
else if (q2 > 20.*GeV && q2 <= 50.*GeV) {
if (x > 0.0005 && x <= 0.0014) bin[0] = 12;
if (x > 0.0014 && x <= 0.0030) bin[0] = 13;
if (x > 0.0030 && x <= 0.0100) bin[0] = 14;
}
else if (q2 > 50.*GeV && q2 <= 100.*GeV) {
if (x >0.0008 && x <= 0.0030) bin[0] = 15;
if (x >0.0030 && x <= 0.0200) bin[0] = 16;
}
// check in one of the bins
evcut[0] &= bin[0] >= 0;
// For the low Q2 selection b)
if (q2 > 2.5*GeV && q2 <= 5. *GeV) bin[1] = 0;
if (q2 > 5. *GeV && q2 <= 10. *GeV) bin[1] = 1;
if (q2 > 10.*GeV && q2 <= 20. *GeV) bin[1] = 2;
if (q2 > 20.*GeV && q2 <= 50. *GeV) bin[1] = 3;
if (q2 > 50.*GeV && q2 <= 100.*GeV) bin[1] = 4;
// check in one of the bins
evcut[1] &= bin[1] >= 0;
// for the high Q2 selection a)
if (q2 > 100.*GeV && q2 <= 400.*GeV) {
if (x > 0.00251 && x <= 0.00631) bin[2] = 0;
if (x > 0.00631 && x <= 0.0158 ) bin[2] = 1;
if (x > 0.0158 && x <= 0.0398 ) bin[2] = 2;
}
else if (q2 > 400.*GeV && q2 <= 1100.*GeV) {
if (x > 0.00631 && x <= 0.0158 ) bin[2] = 3;
if (x > 0.0158 && x <= 0.0398 ) bin[2] = 4;
if (x > 0.0398 && x <= 1. ) bin[2] = 5;
}
else if (q2 > 1100.*GeV && q2 <= 100000.*GeV) {
if (x > 0. && x <= 1.) bin[2] = 6;
}
// check in one of the bins
evcut[2] &= bin[2] >= 0;
// for the high Q2 selection b)
if (q2 > 100.*GeV && q2 <= 220.*GeV) bin[3] = 0;
else if (q2 > 220.*GeV && q2 <= 400.*GeV) bin[3] = 1;
else if (q2 > 400. ) bin[3] = 2;
// check in one of*GeV the bins
evcut[3] &= bin[3] >= 0;
// Veto if fails all cuts after bin selection
/// @todo Can we use all()?
if (! (evcut[0] || evcut[1] || evcut[2] || evcut[3])) vetoEvent;
// Increment the count for normalisation
- const double weight = 1.0;
- if (evcut[0]) _weightETLowQa [bin[0]] += weight;
- if (evcut[1]) _weightETLowQb [bin[1]] += weight;
- if (evcut[2]) _weightETHighQa[bin[2]] += weight;
- if (evcut[3]) _weightETHighQb[bin[3]] += weight;
+ if (evcut[0]) _weightETLowQa [bin[0]]->fill();
+ if (evcut[1]) _weightETLowQb [bin[1]]->fill();
+ if (evcut[2]) _weightETHighQa[bin[2]]->fill();
+ if (evcut[3]) _weightETHighQb[bin[3]]->fill();
// Boost to hadronic CoM
const LorentzTransform hcmboost = dk.boostHCM();
// Loop over the particles
double etcent = 0;
double etfrag = 0;
foreach (const Particle& p, particles) {
// Boost momentum to CMS
const FourMomentum hcmMom = hcmboost.transform(p.momentum());
double et = fabs(hcmMom.Et());
double eta = hcmMom.eta();
// Averages in central and forward region
if (fabs(eta) < .5 ) etcent += et;
if (eta > 2 && eta <= 3.) etfrag += et;
// Histograms of Et flow
- if (evcut[0]) _histETLowQa [bin[0]]->fill(eta, et*weight);
- if (evcut[1]) _histETLowQb [bin[1]]->fill(eta, et*weight);
- if (evcut[2]) _histETHighQa[bin[2]]->fill(eta, et*weight);
- if (evcut[3]) _histETHighQb[bin[3]]->fill(eta, et*weight);
+ if (evcut[0]) _histETLowQa [bin[0]]->fill(eta, et);
+ if (evcut[1]) _histETLowQb [bin[1]]->fill(eta, et);
+ if (evcut[2]) _histETHighQa[bin[2]]->fill(eta, et);
+ if (evcut[3]) _histETHighQb[bin[3]]->fill(eta, et);
}
// Fill histograms for the average quantities
if (evcut[1] || evcut[3]) {
- _histAverETCentral->fill(q2, etcent, weight);
- _histAverETFrag ->fill(q2, etfrag, weight);
+ _histAverETCentral->fill(q2, etcent);
+ _histAverETFrag ->fill(q2, etfrag);
}
}
// Finalize
void finalize() {
// Normalization of the Et distributions
/// @todo Simplify by using normalize() instead? Are all these being normalized to area=1?
for (size_t ix = 0; ix < 17; ++ix) if (_weightETLowQa[ix] != 0) scale(_histETLowQa[ix], 1/_weightETLowQa[ix]);
for (size_t ix = 0; ix < 7; ++ix) if (_weightETHighQa[ix] != 0) scale(_histETHighQa[ix], 1/_weightETHighQa[ix]);
for (size_t ix = 0; ix < 5; ++ix) if (_weightETLowQb[ix] != 0) scale(_histETLowQb[ix], 1/_weightETLowQb[ix]);
for (size_t ix = 0; ix < 3; ++ix) if (_weightETHighQb[ix] != 0) scale(_histETHighQb[ix], 1/_weightETHighQb[ix]);
}
//@}
private:
/// @name Histograms
//@{
vector<Histo1DPtr> _histETLowQa;
vector<Histo1DPtr> _histETHighQa;
vector<Histo1DPtr> _histETLowQb;
vector<Histo1DPtr> _histETHighQb;
Profile1DPtr _histAverETCentral;
Profile1DPtr _histAverETFrag;
//@}
/// @name storage of weights for normalisation
//@{
- vector<double> _weightETLowQa;
- vector<double> _weightETHighQa;
- vector<double> _weightETLowQb;
- vector<double> _weightETHighQb;
+ array<CounterPtr,17> _weightETLowQa;
+ array<CounterPtr, 7> _weightETHighQa;
+ array<CounterPtr, 5> _weightETLowQb;
+ array<CounterPtr, 3> _weightETHighQb;
//@}
};
DECLARE_RIVET_PLUGIN(H1_2000_S4129130);
}
diff --git a/analyses/pluginHERA/ZEUS_2001_S4815815.cc b/analyses/pluginHERA/ZEUS_2001_S4815815.cc
--- a/analyses/pluginHERA/ZEUS_2001_S4815815.cc
+++ b/analyses/pluginHERA/ZEUS_2001_S4815815.cc
@@ -1,184 +1,183 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/DISKinematics.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief ZEUS dijet photoproduction study used in the ZEUS jets PDF fit
///
/// This class is a reproduction of the HZTool routine for the ZEUS
/// dijet photoproduction paper which was used in the ZEUS jets PDF fit.
///
/// @note Cleaning cuts on event pT/sqrt(Et) and y_e are not needed in MC analysis.
///
/// @author Andy Buckley
/// @author Ilkka Helenius
class ZEUS_2001_S4815815 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(ZEUS_2001_S4815815);
/// @name Analysis methods
//@{
// Book projections and histograms
void init() {
/// @todo Acceptance
FinalState fs;
declare(FastJets(fs, FastJets::KT, 1.0), "Jets"); //< R=1 checked with Matt Wing
// Projections
declare(DISLepton(), "Lepton");
// Table 1
book(_h_costh[0] ,1, 1, 1);
book(_h_costh[1] ,1, 1, 2);
// Table 2
book(_h_etjet1[1][0] ,2, 1, 1);
book(_h_etjet1[1][1] ,3, 1, 1);
book(_h_etjet1[1][2] ,4, 1, 1);
book(_h_etjet1[1][3] ,5, 1, 1);
book(_h_etjet1[1][4] ,6, 1, 1);
book(_h_etjet1[1][5] ,7, 1, 1);
// Table 3
book(_h_etjet1[0][0] ,8, 1, 1);
book(_h_etjet1[0][1] ,9, 1, 1);
book(_h_etjet1[0][2] ,10, 1, 1);
book(_h_etjet1[0][3] ,11, 1, 1);
book(_h_etjet1[0][4] ,12, 1, 1);
book(_h_etjet1[0][5] ,13, 1, 1);
// Table 4
book(_h_etajet2[1][0] ,14, 1, 1);
book(_h_etajet2[1][1] ,15, 1, 1);
book(_h_etajet2[1][2] ,16, 1, 1);
// Table 5
book(_h_etajet2[0][0] ,17, 1, 1);
book(_h_etajet2[0][1] ,18, 1, 1);
book(_h_etajet2[0][2] ,19, 1, 1);
// Table 6
book(_h_xobsy[0] ,20, 1, 1);
book(_h_xobsy[1] ,21, 1, 1);
book(_h_xobsy[2] ,22, 1, 1);
book(_h_xobsy[3] ,23, 1, 1);
}
// Do the analysis
void analyze(const Event& event) {
// Determine event orientation, since coord system is for +z = proton direction
const ParticlePair bs = event.beams();
if (bs.first.pid() != PID::POSITRON && bs.second.pid() != PID::POSITRON) vetoEvent;
const Particle& bpositron = (bs.first.pid() == PID::POSITRON ? bs.first : bs.second);
if (bs.first.pid() != PID::PROTON && bs.second.pid() != PID::PROTON) vetoEvent;
const Particle& bproton = (bs.first.pid() == PID::PROTON) ? bs.first : bs.second;
const int orientation = sign(bproton.momentum().pz());
MSG_DEBUG("Beam proton = " << bproton.mom() << " GeV => orientation = " << orientation);
// Jet selection
const Jets jets = apply<FastJets>(event, "Jets") \
.jets(Cuts::Et > 11*GeV && Cuts::etaIn(-1*orientation, 2.4*orientation), cmpMomByEt);
MSG_DEBUG("Jet multiplicity = " << jets.size());
if (jets.size() < 2) vetoEvent;
const Jet& j1 = jets[0];
const Jet& j2 = jets[1];
if (j1.Et() < 14*GeV) vetoEvent;
// eta and cos(theta*) computation
const double eta1 = orientation*j1.eta(), eta2 = orientation*j2.eta();
const double etabar = (eta1 + eta2)/2;
const double etadiff = eta1 - eta2;
const double costhetastar = tanh(etadiff/2);
// Calculate the photon 4-vector from the incoming and outgoing lepton.
const DISLepton& leptons = apply<DISLepton>(event,"Lepton");
const FourMomentum qleptonIn = leptons.in();
const FourMomentum qleptonOut = leptons.out();
const FourMomentum qphoton = qleptonIn - qleptonOut;
// Computation and cut on inelasticity
const double inelasticity = dot(bproton.mom(), qphoton) / dot(bproton.mom(), bpositron.mom());
if (!inRange(inelasticity, 0.2, 0.85)) vetoEvent;
// Computation of x_y^obs
// (I assume Ee is the lab frame positron momentum, not in proton rest frame cf. the ambiguous phrase in the paper)
const double xyobs = (j1.Et() * exp(-eta1) + j2.Et() * exp(-eta2)) / (2*inelasticity*bpositron.E());
const size_t i_xyobs = (xyobs < 0.75) ? 0 : 1;
// Calculate the invariant mass of the dijet as in the paper
const double mjj = sqrt( 2.*j1.Et()*j2.Et()*( cosh(j1.eta() - j2.eta()) - cos(j1.phi() - j2.phi()) ) );
// Fill histograms
- const double weight = 1.0;
// T1
if (mjj > 42*GeV && inRange(etabar, 0.1, 1.3))
- _h_costh[i_xyobs]->fill(abs(costhetastar), weight);
+ _h_costh[i_xyobs]->fill(abs(costhetastar));
// T2, T3: Symmetrize eta selection, each event contribute twice to the cross section
for (size_t isel = 0; isel < 2; ++isel) {
double etaJet1 = (isel == 0) ? orientation*j1.eta() : orientation*j2.eta();
double etaJet2 = (isel == 0) ? orientation*j2.eta() : orientation*j1.eta();
if (inRange(etaJet1, -1, 0) && inRange(etaJet2, -1, 0))
- _h_etjet1[i_xyobs][0]->fill(j1.Et()/GeV, weight);
+ _h_etjet1[i_xyobs][0]->fill(j1.Et()/GeV);
else if (inRange(etaJet1, 0, 1) && inRange(etaJet2, -1, 0))
- _h_etjet1[i_xyobs][1]->fill(j1.Et()/GeV, weight);
+ _h_etjet1[i_xyobs][1]->fill(j1.Et()/GeV);
else if (inRange(etaJet1, 0, 1) && inRange(etaJet2, 0, 1))
- _h_etjet1[i_xyobs][2]->fill(j1.Et()/GeV, weight);
+ _h_etjet1[i_xyobs][2]->fill(j1.Et()/GeV);
else if (inRange(etaJet1, 1, 2.4) && inRange(etaJet2, -1, 0))
- _h_etjet1[i_xyobs][3]->fill(j1.Et()/GeV, weight);
+ _h_etjet1[i_xyobs][3]->fill(j1.Et()/GeV);
else if (inRange(etaJet1, 1, 2.4) && inRange(etaJet2, 0, 1))
- _h_etjet1[i_xyobs][4]->fill(j1.Et()/GeV, weight);
+ _h_etjet1[i_xyobs][4]->fill(j1.Et()/GeV);
else if (inRange(etaJet1, 1, 2.4) && inRange(etaJet2, 1, 2.4))
- _h_etjet1[i_xyobs][5]->fill(j1.Et()/GeV, weight);
+ _h_etjet1[i_xyobs][5]->fill(j1.Et()/GeV);
// T4, T5
if (inRange(etaJet1, -1, 0))
- _h_etajet2[i_xyobs][0]->fill(etaJet2, weight);
+ _h_etajet2[i_xyobs][0]->fill(etaJet2);
else if (inRange(etaJet1, 0, 1))
- _h_etajet2[i_xyobs][1]->fill(etaJet2, weight);
+ _h_etajet2[i_xyobs][1]->fill(etaJet2);
else if (inRange(etaJet1, 1, 2.4))
- _h_etajet2[i_xyobs][2]->fill(etaJet2, weight);
+ _h_etajet2[i_xyobs][2]->fill(etaJet2);
}
// T6
if (inRange(j1.Et()/GeV, 14, 17))
- _h_xobsy[0]->fill(xyobs, weight);
+ _h_xobsy[0]->fill(xyobs);
else if (inRange(j1.Et()/GeV, 17, 25))
- _h_xobsy[1]->fill(xyobs, weight);
+ _h_xobsy[1]->fill(xyobs);
else if (inRange(j1.Et()/GeV, 25, 35))
- _h_xobsy[2]->fill(xyobs, weight);
+ _h_xobsy[2]->fill(xyobs);
else if (inRange(j1.Et()/GeV, 35, 90))
- _h_xobsy[3]->fill(xyobs, weight);
+ _h_xobsy[3]->fill(xyobs);
}
// Finalize
void finalize() {
const double sf = crossSection()/picobarn/sumOfWeights();
for (size_t ix = 0; ix < 2; ++ix) {
scale(_h_costh[ix], sf);
for (auto& h : _h_etjet1[ix]) scale(h, sf);
for (auto& h : _h_etajet2[ix]) scale(h, sf);
}
for (auto& h : _h_xobsy) scale(h, sf);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_costh[2], _h_etjet1[2][6], _h_etajet2[2][3], _h_xobsy[4];
//@}
};
DECLARE_RIVET_PLUGIN(ZEUS_2001_S4815815);
}
diff --git a/analyses/pluginLHCb/LHCB_2010_I867355.cc b/analyses/pluginLHCb/LHCB_2010_I867355.cc
--- a/analyses/pluginLHCb/LHCB_2010_I867355.cc
+++ b/analyses/pluginLHCb/LHCB_2010_I867355.cc
@@ -1,90 +1,88 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Particle.hh"
namespace Rivet {
class LHCB_2010_I867355 : public Analysis {
public:
LHCB_2010_I867355() : Analysis("LHCB_2010_I867355")
{ }
void init() {
//@ Results are presented for two different fragmentation functions, LEP and Tevatron. Therefore, we have two sets of histograms.
book(_h_sigma_vs_eta_lep ,1, 1, 1);
book(_h_sigma_vs_eta_tvt ,1, 1, 2);
book(_h_sigma_total_lep ,2, 1, 1);
book(_h_sigma_total_tvt ,2, 1, 2);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- double weight = 1.0;
-
Particles bhadrons;
foreach (const GenParticle* p, particles(event.genEvent())) {
if (!( PID::isHadron( p->pdg_id() ) && PID::hasBottom( p->pdg_id() )) ) continue;
const GenVertex* dv = p->end_vertex();
bool hasBdaughter = false;
if ( PID::isHadron( p->pdg_id() ) && PID::hasBottom( p->pdg_id() )) { // selecting b-hadrons
if (dv) {
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin() ; pp != dv->particles_out_const_end() ; ++pp) {
if (PID::isHadron( (*pp)->pdg_id() ) && PID::hasBottom( (*pp)->pdg_id() )) {
hasBdaughter = true;
}
}
}
}
if (hasBdaughter) continue; // continue if the daughter is another b-hadron
bhadrons += Particle(*p);
}
foreach (const Particle& particle, bhadrons) {
// take fabs() to use full statistics and then multiply weight by 0.5 because LHCb is single-sided
double eta = fabs(particle.eta());
- _h_sigma_vs_eta_lep->fill( eta, 0.5*weight );
- _h_sigma_vs_eta_tvt->fill( eta, 0.5*weight );
+ _h_sigma_vs_eta_lep->fill( eta, 0.5 );
+ _h_sigma_vs_eta_tvt->fill( eta, 0.5 );
- _h_sigma_total_lep->fill( eta, 0.5*weight ); // histogram for full kinematic range
- _h_sigma_total_tvt->fill( eta, 0.5*weight ); // histogram for full kinematic range
+ _h_sigma_total_lep->fill( eta, 0.5 ); // histogram for full kinematic range
+ _h_sigma_total_tvt->fill( eta, 0.5 ); // histogram for full kinematic range
}
}
void finalize() {
double norm = crossSection()/microbarn/sumOfWeights();
double binwidth = 4.; // integrated over full rapidity space from 2 to 6.
// to get the avergae of b and bbar, we scale with 0.5
scale(_h_sigma_vs_eta_lep, 0.5*norm);
scale(_h_sigma_vs_eta_tvt, 0.5*norm);
scale(_h_sigma_total_lep, 0.5*norm*binwidth);
scale(_h_sigma_total_tvt, 0.5*norm*binwidth);
}
private:
Histo1DPtr _h_sigma_total_lep;
Histo1DPtr _h_sigma_total_tvt;
Histo1DPtr _h_sigma_vs_eta_lep;
Histo1DPtr _h_sigma_vs_eta_tvt;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2010_I867355);
}
diff --git a/analyses/pluginLHCb/LHCB_2010_S8758301.cc b/analyses/pluginLHCb/LHCB_2010_S8758301.cc
--- a/analyses/pluginLHCb/LHCB_2010_S8758301.cc
+++ b/analyses/pluginLHCb/LHCB_2010_S8758301.cc
@@ -1,341 +1,554 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Math/Constants.hh"
#include "Rivet/Math/Units.hh"
#include "HepMC/GenEvent.h"
#include "HepMC/GenParticle.h"
#include "HepMC/GenVertex.h"
#include "HepMC/SimpleVector.h"
namespace Rivet {
using namespace HepMC;
-using namespace std;
// Lifetime cut: longest living ancestor ctau < 10^-11 [m]
namespace {
const double MAX_CTAU = 1.0E-11; // [m]
const double MIN_PT = 0.0001; // [GeV/c]
}
class LHCB_2010_S8758301 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2010_S8758301()
: Analysis("LHCB_2010_S8758301"),
- sumKs0_30(0.0), sumKs0_35(0.0),
- sumKs0_40(0.0), sumKs0_badnull(0),
+ sumKs0_badnull(0),
sumKs0_badlft(0), sumKs0_all(0),
sumKs0_outup(0), sumKs0_outdwn(0),
sum_low_pt_loss(0), sum_high_pt_loss(0)
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
MSG_DEBUG("Initializing analysis!");
fillMap(partLftMap);
book(_h_K0s_pt_30 ,1,1,1);
book(_h_K0s_pt_35 ,1,1,2);
book(_h_K0s_pt_40 ,1,1,3);
book(_h_K0s_pt_y_30 ,2,1,1);
book(_h_K0s_pt_y_35 ,2,1,2);
book(_h_K0s_pt_y_40 ,2,1,3);
book(_h_K0s_pt_y_all ,3,1,1);
+
+ book(sumKs0_30, "TMP/sumKs0_30");
+ book(sumKs0_35, "TMP/sumKs0_35");
+ book(sumKs0_40, "TMP/sumKs0_40");
declare(UnstableFinalState(), "UFS");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
int id;
double y, pT;
- const double weight = 1.0;
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
double ancestor_lftime;
foreach (const Particle& p, ufs.particles()) {
id = p.pid();
if ((id != 310) && (id != -310)) continue;
sumKs0_all ++;
ancestor_lftime = 0.;
const GenParticle* long_ancestor = getLongestLivedAncestor(p, ancestor_lftime);
if ( !(long_ancestor) ) {
sumKs0_badnull ++;
continue;
}
if ( ancestor_lftime > MAX_CTAU ) {
sumKs0_badlft ++;
MSG_DEBUG("Ancestor " << long_ancestor->pdg_id() << ", ctau: " << ancestor_lftime << " [m]");
continue;
}
const FourMomentum& qmom = p.momentum();
y = 0.5 * log((qmom.E() + qmom.pz())/(qmom.E() - qmom.pz()));
pT = sqrt((qmom.px() * qmom.px()) + (qmom.py() * qmom.py()));
if (pT < MIN_PT) {
sum_low_pt_loss ++;
MSG_DEBUG("Small pT K^0_S: " << pT << " GeV/c.");
}
if (pT > 1.6) {
sum_high_pt_loss ++;
}
if (y > 2.5 && y < 4.0) {
- _h_K0s_pt_y_all->fill(pT, weight);
+ _h_K0s_pt_y_all->fill(pT);
if (y > 2.5 && y < 3.0) {
- _h_K0s_pt_y_30->fill(pT, weight);
- _h_K0s_pt_30->fill(pT, weight);
- sumKs0_30 += weight;
+ _h_K0s_pt_y_30->fill(pT);
+ _h_K0s_pt_30->fill(pT);
+ sumKs0_30->fill();
} else if (y > 3.0 && y < 3.5) {
- _h_K0s_pt_y_35->fill(pT, weight);
- _h_K0s_pt_35->fill(pT, weight);
- sumKs0_35 += weight;
+ _h_K0s_pt_y_35->fill(pT);
+ _h_K0s_pt_35->fill(pT);
+ sumKs0_35->fill();
} else if (y > 3.5 && y < 4.0) {
- _h_K0s_pt_y_40->fill(pT, weight);
- _h_K0s_pt_40->fill(pT, weight);
- sumKs0_40 += weight;
+ _h_K0s_pt_y_40->fill(pT);
+ _h_K0s_pt_40->fill(pT);
+ sumKs0_40->fill();
}
} else if (y < 2.5) {
sumKs0_outdwn ++;
} else if (y > 4.0) {
sumKs0_outup ++;
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
MSG_DEBUG("Total number Ks0: " << sumKs0_all << endl
<< "Sum of weights: " << sumOfWeights() << endl
- << "Weight Ks0 (2.5 < y < 3.0): " << sumKs0_30 << endl
- << "Weight Ks0 (3.0 < y < 3.5): " << sumKs0_35 << endl
- << "Weight Ks0 (3.5 < y < 4.0): " << sumKs0_40 << endl
+ << "Weight Ks0 (2.5 < y < 3.0): " << double(sumKs0_30) << endl
+ << "Weight Ks0 (3.0 < y < 3.5): " << double(sumKs0_35) << endl
+ << "Weight Ks0 (3.5 < y < 4.0): " << double(sumKs0_40) << endl
<< "Nb. unprompt Ks0 [null mother]: " << sumKs0_badnull << endl
<< "Nb. unprompt Ks0 [mother lifetime exceeded]: " << sumKs0_badlft << endl
<< "Nb. Ks0 (y > 4.0): " << sumKs0_outup << endl
<< "Nb. Ks0 (y < 2.5): " << sumKs0_outdwn << endl
<< "Nb. Ks0 (pT < " << (MIN_PT/MeV) << " MeV/c): " << sum_low_pt_loss << endl
<< "Nb. Ks0 (pT > 1.6 GeV/c): " << sum_high_pt_loss << endl
<< "Cross-section [mb]: " << crossSection()/millibarn << endl
<< "Nb. events: " << numEvents());
// Compute cross-section; multiply by bin width for correct scaling
// cross-section given by Rivet in pb
double xsection_factor = crossSection()/sumOfWeights();
// Multiply bin width for correct scaling, xsection in mub
scale(_h_K0s_pt_30, 0.2*xsection_factor/microbarn);
scale(_h_K0s_pt_35, 0.2*xsection_factor/microbarn);
scale(_h_K0s_pt_40, 0.2*xsection_factor/microbarn);
// Divide by dy (rapidity window width), xsection in mb
scale(_h_K0s_pt_y_30, xsection_factor/0.5/millibarn);
scale(_h_K0s_pt_y_35, xsection_factor/0.5/millibarn);
scale(_h_K0s_pt_y_40, xsection_factor/0.5/millibarn);
scale(_h_K0s_pt_y_all, xsection_factor/1.5/millibarn);
}
//@}
private:
/// Get particle lifetime from hardcoded data
double getLifeTime(int pid) {
double lft = -1.0;
if (pid < 0) pid = - pid;
// Correct Pythia6 PIDs for f0(980), f0(1370) mesons
if (pid == 10331) pid = 30221;
if (pid == 10221) pid = 9010221;
map<int, double>::iterator pPartLft = partLftMap.find(pid);
// search stable particle list
if (pPartLft == partLftMap.end()) {
if (pid <= 100) return 0.0;
- for (unsigned int i=0; i < sizeof(stablePDGIds)/sizeof(unsigned int); i++ ) {
- if (pid == stablePDGIds[i]) { lft = 0.0; break; }
+ for ( auto id : stablePDGIds ) {
+ if (pid == id) { lft = 0.0; break; }
}
} else {
lft = (*pPartLft).second;
}
if (lft < 0.0)
MSG_ERROR("Could not determine lifetime for particle with PID " << pid
<< "... This K_s^0 will be considered unprompt!");
return lft;
}
const GenParticle* getLongestLivedAncestor(const Particle& p, double& lifeTime) {
- const GenParticle* ret = NULL;
+ const GenParticle* ret = nullptr;
lifeTime = 1.;
- if (p.genParticle() == NULL) return NULL;
+ if (p.genParticle() == nullptr) return nullptr;
const GenParticle* pmother = p.genParticle();
double longest_ctau = 0.;
double mother_ctau;
int mother_pid, n_inparts;
const GenVertex* ivertex = pmother->production_vertex();
while (ivertex) {
n_inparts = ivertex->particles_in_size();
- if (n_inparts < 1) {ret = NULL; break;} // error: should never happen!
- const GenVertex::particles_in_const_iterator iPart_invtx = ivertex->particles_in_const_begin();
+ if (n_inparts < 1) {ret = nullptr; break;} // error: should never happen!
+ const auto iPart_invtx = ivertex->particles_in_const_begin();
pmother = (*iPart_invtx); // first mother particle
mother_pid = pmother->pdg_id();
ivertex = pmother->production_vertex(); // get next vertex
if ( (mother_pid == 2212) || (mother_pid <= 100) ) {
- if (ret == NULL) ret = pmother;
+ if (ret == nullptr) ret = pmother;
continue;
}
mother_ctau = getLifeTime(mother_pid);
- if (mother_ctau < 0.) { ret= NULL; break; } // error:should never happen!
+ if (mother_ctau < 0.) { ret= nullptr; break; } // error:should never happen!
if (mother_ctau > longest_ctau) {
longest_ctau = mother_ctau;
ret = pmother;
}
}
if (ret) lifeTime = longest_ctau * c_light;
return ret;
}
// Fill the PDG Id to Lifetime[seconds] map
// Data was extract from LHCb Particle Table using ParticleSvc
bool fillMap(map<int, double> &m) {
- m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16;
- m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16;
- m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26;
- m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12;
- m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24;
- m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08;
- m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24;
- m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24;
- m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24;
- m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23;
- m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21;
- m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12;
- m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13;
- m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19;
- m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22;
- m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23;
- m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12;
- m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13;
- m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24;
- m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24;
- m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24;
- m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24;
- m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24;
- m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24;
- m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24;
- m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24;
- m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24;
- m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24;
- m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10;
- m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23;
- m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22;
- m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14;
- m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19;
- m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19;
- m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19;
- m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12;
- m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12;
- m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24;
- m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24;
- m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24;
- m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24;
- m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24;
- m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23;
- m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23;
- m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24;
- m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24;
- m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24;
- m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24;
- m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23;
- m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24;
- m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24;
- m[13224] = 1.09702E-23; m[13226] = 5.485102E-24;
- m[13312] = 4.135667E-22; m[13314] = 2.742551E-23;
- m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24;
- m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24;
- m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24;
- m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22;
- m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24;
- m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24;
- m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24;
- m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24;
- m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24;
- m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24;
- m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24;
- m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24;
- m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24;
- m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24;
- m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24;
- m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24;
- m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24;
- m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24;
- m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24;
- m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24;
- m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24;
- m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24;
- m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20;
- m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24;
- m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24;
- m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24;
- m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24;
- m[9020221] = 8.093281E-23; m[9020443] = 1.061633E-23;
- m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24;
- m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24;
- m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21;
+ m[6] = 4.707703E-25;
+ m[11] = 1.E+16;
+ m[12] = 1.E+16;
+ m[13] = 2.197019E-06;
+ m[14] = 1.E+16;
+ m[15] = 2.906E-13;
+ m[16] = 1.E+16;
+ m[22] = 1.E+16;
+ m[23] = 2.637914E-25;
+ m[24] = 3.075758E-25;
+ m[25] = 9.4E-26;
+ m[35] = 9.4E-26;
+ m[36] = 9.4E-26;
+ m[37] = 9.4E-26;
+ m[84] = 3.335641E-13;
+ m[85] = 1.290893E-12;
+ m[111] = 8.4E-17;
+ m[113] = 4.405704E-24;
+ m[115] = 6.151516E-24;
+ m[117] = 4.088275E-24;
+ m[119] = 2.102914E-24;
+ m[130] = 5.116E-08;
+ m[150] = 1.525E-12;
+ m[211] = 2.6033E-08;
+ m[213] = 4.405704E-24;
+ m[215] = 6.151516E-24;
+ m[217] = 4.088275E-24;
+ m[219] = 2.102914E-24;
+ m[221] = 5.063171E-19;
+ m[223] = 7.752794E-23;
+ m[225] = 3.555982E-24;
+ m[227] = 3.91793E-24;
+ m[229] = 2.777267E-24;
+ m[310] = 8.953E-11;
+ m[313] = 1.308573E-23;
+ m[315] = 6.038644E-24;
+ m[317] = 4.139699E-24;
+ m[319] = 3.324304E-24;
+ m[321] = 1.238E-08;
+ m[323] = 1.295693E-23;
+ m[325] = 6.682357E-24;
+ m[327] = 4.139699E-24;
+ m[329] = 3.324304E-24;
+ m[331] = 3.210791E-21;
+ m[333] = 1.545099E-22;
+ m[335] = 9.016605E-24;
+ m[337] = 7.565657E-24;
+ m[350] = 1.407125E-12;
+ m[411] = 1.04E-12;
+ m[413] = 6.856377E-21;
+ m[415] = 1.778952E-23;
+ m[421] = 4.101E-13;
+ m[423] = 1.000003E-19;
+ m[425] = 1.530726E-23;
+ m[431] = 5.E-13;
+ m[433] = 1.000003E-19;
+ m[435] = 3.291061E-23;
+ m[441] = 2.465214E-23;
+ m[443] = 7.062363E-21;
+ m[445] = 3.242425E-22;
+ m[510] = 1.525E-12;
+ m[511] = 1.525E-12;
+ m[513] = 1.000019E-19;
+ m[515] = 1.31E-23;
+ m[521] = 1.638E-12;
+ m[523] = 1.000019E-19;
+ m[525] = 1.31E-23;
+ m[530] = 1.536875E-12;
+ m[531] = 1.472E-12;
+ m[533] = 1.E-19;
+ m[535] = 1.31E-23;
+ m[541] = 4.5E-13;
+ m[553] = 1.218911E-20;
+ m[1112] = 4.539394E-24;
+ m[1114] = 5.578069E-24;
+ m[1116] = 1.994582E-24;
+ m[1118] = 2.269697E-24;
+ m[1212] = 4.539394E-24;
+ m[1214] = 5.723584E-24;
+ m[1216] = 1.994582E-24;
+ m[1218] = 1.316424E-24;
+ m[2112] = 8.857E+02;
+ m[2114] = 5.578069E-24;
+ m[2116] = 4.388081E-24;
+ m[2118] = 2.269697E-24;
+ m[2122] = 4.539394E-24;
+ m[2124] = 5.723584E-24;
+ m[2126] = 1.994582E-24;
+ m[2128] = 1.316424E-24;
+ m[2212] = 1.E+16;
+ m[2214] = 5.578069E-24;
+ m[2216] = 4.388081E-24;
+ m[2218] = 2.269697E-24;
+ m[2222] = 4.539394E-24;
+ m[2224] = 5.578069E-24;
+ m[2226] = 1.994582E-24;
+ m[2228] = 2.269697E-24;
+ m[3112] = 1.479E-10;
+ m[3114] = 1.670589E-23;
+ m[3116] = 5.485102E-24;
+ m[3118] = 3.656734E-24;
+ m[3122] = 2.631E-10;
+ m[3124] = 4.219309E-23;
+ m[3126] = 8.227653E-24;
+ m[3128] = 3.291061E-24;
+ m[3212] = 7.4E-20;
+ m[3214] = 1.828367E-23;
+ m[3216] = 5.485102E-24;
+ m[3218] = 3.656734E-24;
+ m[3222] = 8.018E-11;
+ m[3224] = 1.838582E-23;
+ m[3226] = 5.485102E-24;
+ m[3228] = 3.656734E-24;
+ m[3312] = 1.639E-10;
+ m[3314] = 6.648608E-23;
+ m[3322] = 2.9E-10;
+ m[3324] = 7.233101E-23;
+ m[3334] = 8.21E-11;
+ m[4112] = 2.991874E-22;
+ m[4114] = 4.088274E-23;
+ m[4122] = 2.E-13;
+ m[4132] = 1.12E-13;
+ m[4212] = 3.999999E-22;
+ m[4214] = 3.291061E-22;
+ m[4222] = 2.951624E-22;
+ m[4224] = 4.417531E-23;
+ m[4232] = 4.42E-13;
+ m[4332] = 6.9E-14;
+ m[4412] = 3.335641E-13;
+ m[4422] = 3.335641E-13;
+ m[4432] = 3.335641E-13;
+ m[5112] = 1.E-19;
+ m[5122] = 1.38E-12;
+ m[5132] = 1.42E-12;
+ m[5142] = 1.290893E-12;
+ m[5212] = 1.E-19;
+ m[5222] = 1.E-19;
+ m[5232] = 1.42E-12;
+ m[5242] = 1.290893E-12;
+ m[5312] = 1.E-19;
+ m[5322] = 1.E-19;
+ m[5332] = 1.55E-12;
+ m[5342] = 1.290893E-12;
+ m[5442] = 1.290893E-12;
+ m[5512] = 1.290893E-12;
+ m[5522] = 1.290893E-12;
+ m[5532] = 1.290893E-12;
+ m[5542] = 1.290893E-12;
+ m[10111] = 2.48382E-24;
+ m[10113] = 4.635297E-24;
+ m[10115] = 2.54136E-24;
+ m[10211] = 2.48382E-24;
+ m[10213] = 4.635297E-24;
+ m[10215] = 2.54136E-24;
+ m[10223] = 1.828367E-24;
+ m[10225] = 3.636531E-24;
+ m[10311] = 2.437823E-24;
+ m[10313] = 7.313469E-24;
+ m[10315] = 3.538775E-24;
+ m[10321] = 2.437823E-24;
+ m[10323] = 7.313469E-24;
+ m[10325] = 3.538775E-24;
+ m[10331] = 4.804469E-24;
+ m[10411] = 4.38E-24;
+ m[10413] = 3.29E-23;
+ m[10421] = 4.38E-24;
+ m[10423] = 3.22653E-23;
+ m[10431] = 6.5821E-22;
+ m[10433] = 6.5821E-22;
+ m[10441] = 6.453061E-23;
+ m[10511] = 4.39E-24;
+ m[10513] = 1.65E-23;
+ m[10521] = 4.39E-24;
+ m[10523] = 1.65E-23;
+ m[10531] = 4.39E-24;
+ m[10533] = 1.65E-23;
+ m[11114] = 2.194041E-24;
+ m[11116] = 1.828367E-24;
+ m[11212] = 1.880606E-24;
+ m[11216] = 1.828367E-24;
+ m[12112] = 2.194041E-24;
+ m[12114] = 2.194041E-24;
+ m[12116] = 5.063171E-24;
+ m[12126] = 1.828367E-24;
+ m[12212] = 2.194041E-24;
+ m[12214] = 2.194041E-24;
+ m[12216] = 5.063171E-24;
+ m[12224] = 2.194041E-24;
+ m[12226] = 1.828367E-24;
+ m[13112] = 6.582122E-24;
+ m[13114] = 1.09702E-23;
+ m[13116] = 5.485102E-24;
+ m[13122] = 1.316424E-23;
+ m[13124] = 1.09702E-23;
+ m[13126] = 6.928549E-24;
+ m[13212] = 6.582122E-24;
+ m[13214] = 1.09702E-23;
+ m[13216] = 5.485102E-24;
+ m[13222] = 6.582122E-24;
+ m[13224] = 1.09702E-23;
+ m[13226] = 5.485102E-24;
+ m[13312] = 4.135667E-22;
+ m[13314] = 2.742551E-23;
+ m[13324] = 2.742551E-23;
+ m[14122] = 1.828367E-22;
+ m[20022] = 1.E+16;
+ m[20113] = 1.567172E-24;
+ m[20213] = 1.567172E-24;
+ m[20223] = 2.708692E-23;
+ m[20313] = 3.782829E-24;
+ m[20315] = 2.384827E-24;
+ m[20323] = 3.782829E-24;
+ m[20325] = 2.384827E-24;
+ m[20333] = 1.198929E-23;
+ m[20413] = 2.63E-24;
+ m[20423] = 2.63E-24;
+ m[20433] = 6.5821E-22;
+ m[20443] = 7.395643E-22;
+ m[20513] = 2.63E-24;
+ m[20523] = 2.63E-24;
+ m[20533] = 2.63E-24;
+ m[21112] = 2.632849E-24;
+ m[21114] = 3.291061E-24;
+ m[21212] = 2.632849E-24;
+ m[21214] = 6.582122E-24;
+ m[22112] = 4.388081E-24;
+ m[22114] = 3.291061E-24;
+ m[22122] = 2.632849E-24;
+ m[22124] = 6.582122E-24;
+ m[22212] = 4.388081E-24;
+ m[22214] = 3.291061E-24;
+ m[22222] = 2.632849E-24;
+ m[22224] = 3.291061E-24;
+ m[23112] = 7.313469E-24;
+ m[23114] = 2.991874E-24;
+ m[23122] = 4.388081E-24;
+ m[23124] = 6.582122E-24;
+ m[23126] = 3.291061E-24;
+ m[23212] = 7.313469E-24;
+ m[23214] = 2.991874E-24;
+ m[23222] = 7.313469E-24;
+ m[23224] = 2.991874E-24;
+ m[30113] = 2.632849E-24;
+ m[30213] = 2.632849E-24;
+ m[30221] = 1.880606E-24;
+ m[30223] = 2.089563E-24;
+ m[30313] = 2.056913E-24;
+ m[30323] = 2.056913E-24;
+ m[30443] = 2.419898E-23;
+ m[31114] = 1.880606E-24;
+ m[31214] = 3.291061E-24;
+ m[32112] = 3.989164E-24;
+ m[32114] = 1.880606E-24;
+ m[32124] = 3.291061E-24;
+ m[32212] = 3.989164E-24;
+ m[32214] = 1.880606E-24;
+ m[32224] = 1.880606E-24;
+ m[33122] = 1.880606E-23;
+ m[42112] = 6.582122E-24;
+ m[42212] = 6.582122E-24;
+ m[43122] = 2.194041E-24;
+ m[53122] = 4.388081E-24;
+ m[100111] = 1.645531E-24;
+ m[100113] = 1.64553E-24;
+ m[100211] = 1.645531E-24;
+ m[100213] = 1.64553E-24;
+ m[100221] = 1.196749E-23;
+ m[100223] = 3.061452E-24;
+ m[100313] = 2.837122E-24;
+ m[100323] = 2.837122E-24;
+ m[100331] = 4.459432E-25;
+ m[100333] = 4.388081E-24;
+ m[100441] = 4.701516E-23;
+ m[100443] = 2.076379E-21;
+ m[100553] = 2.056913E-20;
+ m[200553] = 3.242425E-20;
+ m[300553] = 3.210791E-23;
+ m[9000111] = 8.776163E-24;
+ m[9000211] = 8.776163E-24;
+ m[9000443] = 8.227652E-24;
+ m[9000553] = 5.983747E-24;
+ m[9010111] = 3.164482E-24;
+ m[9010211] = 3.164482E-24;
+ m[9010221] = 9.403031E-24;
+ m[9010443] = 8.438618E-24;
+ m[9010553] = 8.3318E-24;
+ m[9020221] = 8.093281E-23;
+ m[9020443] = 1.061633E-23;
+ m[9030221] = 6.038644E-24;
+ m[9042413] = 2.07634E-21;
+ m[9050225] = 1.394517E-24;
+ m[9060225] = 3.291061E-24;
+ m[9080225] = 4.388081E-24;
+ m[9090225] = 2.056913E-24;
+ m[9910445] = 2.07634E-21;
+ m[9920443] = 2.07634E-21;
return true;
}
/// @name Histograms
//@{
Histo1DPtr _h_K0s_pt_y_30; // histogram for 2.5 < y < 3.0 (d2sigma)
Histo1DPtr _h_K0s_pt_y_35; // histogram for 3.0 < y < 3.5 (d2sigma)
Histo1DPtr _h_K0s_pt_y_40; // histogram for 3.5 < y < 4.0 (d2sigma)
Histo1DPtr _h_K0s_pt_30; // histogram for 2.5 < y < 3.0 (sigma)
Histo1DPtr _h_K0s_pt_35; // histogram for 3.0 < y < 3.5 (sigma)
Histo1DPtr _h_K0s_pt_40; // histogram for 3.5 < y < 4.0 (sigma)
Histo1DPtr _h_K0s_pt_y_all; // histogram for 2.5 < y < 4.0 (d2sigma)
- double sumKs0_30; // Sum of weights 2.5 < y < 3.0
- double sumKs0_35; // Sum of weights 3.0 < y < 3.5
- double sumKs0_40; // Sum of weights 3.5 < y < 4.0
+ CounterPtr sumKs0_30; // Sum of weights 2.5 < y < 3.0
+ CounterPtr sumKs0_35; // Sum of weights 3.0 < y < 3.5
+ CounterPtr sumKs0_40; // Sum of weights 3.5 < y < 4.0
// Various counters mainly for debugging and comparisons between different generators
size_t sumKs0_badnull; // Nb of particles for which mother could not be identified
size_t sumKs0_badlft; // Nb of mesons with long lived mothers
size_t sumKs0_all; // Nb of all Ks0 generated
size_t sumKs0_outup; // Nb of mesons with y > 4.0
size_t sumKs0_outdwn; // Nb of mesons with y < 2.5
size_t sum_low_pt_loss; // Nb of mesons with very low pT (indicates when units are mixed-up)
size_t sum_high_pt_loss; // Nb of mesons with pT > 1.6 GeV/c
// Map between PDG id and particle lifetimes in seconds
std::map<int, double> partLftMap;
// Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable)
- static const int stablePDGIds[205];
+ static const array<int,171> stablePDGIds;
//@}
};
// Actual initialization according to ISO C++ requirements
- const int LHCB_2010_S8758301::stablePDGIds[205] = {
+ const array<int,171> LHCB_2010_S8758301::stablePDGIds{{
311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303,
4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414,
4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324,
5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534,
5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112,
12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343,
30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321,
100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555,
120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002,
1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015,
1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039,
2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013,
2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223,
3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001,
4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023,
- 9900024, 9900041, 9900042};
+ 9900024, 9900041, 9900042}};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2010_S8758301);
}
diff --git a/analyses/pluginLHCb/LHCB_2011_I917009.cc b/analyses/pluginLHCb/LHCB_2011_I917009.cc
--- a/analyses/pluginLHCb/LHCB_2011_I917009.cc
+++ b/analyses/pluginLHCb/LHCB_2011_I917009.cc
@@ -1,323 +1,333 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
class LHCB_2011_I917009 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2011_I917009()
: Analysis("LHCB_2011_I917009"),
rap_beam(0.0), pt_min(0.0),
pt1_edge(0.65), pt2_edge(1.0),
pt3_edge(2.5), rap_min(2.),
rap_max(0.0), dsShift(0)
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
int y_nbins = 4;
fillMap(partLftMap);
if (fuzzyEquals(sqrtS(), 0.9*TeV)) {
rap_beam = 6.87;
rap_max = 4.;
pt_min = 0.25;
} else if (fuzzyEquals(sqrtS(), 7*TeV)) {
rap_beam = 8.92;
rap_max = 4.5;
pt_min = 0.15;
y_nbins = 5;
dsShift = 8;
} else {
MSG_ERROR("Incompatible beam energy!");
}
// Create the sets of temporary histograms that will be used to make the ratios in the finalize()
- for (size_t i = 0; i < 12; ++i) _tmphistos[i] = YODA::Histo1D(y_nbins, rap_min, rap_max);
- for (size_t i = 12; i < 15; ++i) _tmphistos[i] = YODA::Histo1D(refData(dsShift+5, 1, 1));
- for (size_t i = 15; i < 18; ++i) _tmphistos[i] = YODA::Histo1D(y_nbins, rap_beam - rap_max, rap_beam - rap_min);
+ for (size_t i = 0; i < 12; ++i) book(_tmphistos[i], "TMP/"+to_str(i), y_nbins, rap_min, rap_max);
+ for (size_t i = 12; i < 15; ++i) book(_tmphistos[i], "TMP/"+to_str(i), refData(dsShift+5, 1, 1));
+ for (size_t i = 15; i < 18; ++i) book(_tmphistos[i], "TMP/"+to_str(i), y_nbins, rap_beam - rap_max, rap_beam - rap_min);
+
+ int dsId = dsShift + 1;
+ for (size_t j = 0; j < 3; ++j) {
+ book(s1, dsId, 1, j+1);
+ book(s2, dsId+1, 1, j+1);
+ }
+ dsId += 2;
+ for (size_t j = 3; j < 6; ++j) {
+ book(s3, dsId, 1, 1);
+ dsId += 1;
+ book(s4, dsId, 1, 1);
+ dsId += 1;
+ }
declare(UnstableFinalState(), "UFS");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
double ancestor_lftsum = 0.0;
double y, pT;
int id;
int partIdx = -1;
foreach (const Particle& p, ufs.particles()) {
id = p.pid();
// continue if particle not a K0s nor (anti-)Lambda
if ( (id == 310) || (id == -310) ) {
partIdx = 2;
} else if ( id == 3122 ) {
partIdx = 1;
} else if ( id == -3122 ) {
partIdx = 0;
} else {
continue;
}
ancestor_lftsum = getMotherLifeTimeSum(p);
// Lifetime cut: ctau sum of all particle ancestors < 10^-9 m according to the paper (see eq. 5)
const double MAX_CTAU = 1.0E-9; // [m]
if ( (ancestor_lftsum < 0.0) || (ancestor_lftsum > MAX_CTAU) ) continue;
const FourMomentum& qmom = p.momentum();
y = log((qmom.E() + qmom.pz())/(qmom.E() - qmom.pz()))/2.;
// skip this particle if it has too high or too low rapidity (extremely rare cases when E = +- pz)
if ( std::isnan(y) || std::isinf(y) ) continue;
y = fabs(y);
if (!inRange(y, rap_min, rap_max)) continue;
pT = sqrt((qmom.px() * qmom.px()) + (qmom.py() * qmom.py()));
if (!inRange(pT, pt_min, pt3_edge)) continue;
// Filling corresponding temporary histograms for pT intervals
- if (inRange(pT, pt_min, pt1_edge)) _tmphistos[partIdx*3].fill(y, weight);
- if (inRange(pT, pt1_edge, pt2_edge)) _tmphistos[partIdx*3+1].fill(y, weight);
- if (inRange(pT, pt2_edge, pt3_edge)) _tmphistos[partIdx*3+2].fill(y, weight);
+ if (inRange(pT, pt_min, pt1_edge)) _tmphistos[partIdx*3]->fill(y);
+ if (inRange(pT, pt1_edge, pt2_edge)) _tmphistos[partIdx*3+1]->fill(y);
+ if (inRange(pT, pt2_edge, pt3_edge)) _tmphistos[partIdx*3+2]->fill(y);
// Fill histo in rapidity for whole pT interval
- _tmphistos[partIdx+9].fill(y, weight);
+ _tmphistos[partIdx+9]->fill(y);
// Fill histo in pT for whole rapidity interval
- _tmphistos[partIdx+12].fill(pT, weight);
+ _tmphistos[partIdx+12]->fill(pT);
// Fill histo in rapidity loss for whole pT interval
- _tmphistos[partIdx+15].fill(rap_beam - y, weight);
+ _tmphistos[partIdx+15]->fill(rap_beam - y);
}
}
// Generate the ratio histograms
void finalize() {
int dsId = dsShift + 1;
for (size_t j = 0; j < 3; ++j) {
/// @todo Compactify to two one-liners
- Scatter2DPtr s1; book(s1, dsId, 1, j+1);
divide(_tmphistos[j], _tmphistos[3+j], s1);
- Scatter2DPtr s2; book(s2, dsId+1, 1, j+1);
divide(_tmphistos[j], _tmphistos[6+j], s2);
}
dsId += 2;
for (size_t j = 3; j < 6; ++j) {
/// @todo Compactify to two one-liners
- Scatter2DPtr s1; book(s1, dsId, 1, 1);
- divide(_tmphistos[3*j], _tmphistos[3*j+1], s1);
+ divide(_tmphistos[3*j], _tmphistos[3*j+1], s3);
dsId += 1;
- Scatter2DPtr s2; book(s2, dsId, 1, 1);
- divide(_tmphistos[3*j], _tmphistos[3*j+2], s2);
+ divide(_tmphistos[3*j], _tmphistos[3*j+2], s4);
dsId += 1;
}
}
//@}
private:
// Get particle lifetime from hardcoded data
double getLifeTime(int pid) {
double lft = -1.0;
if (pid < 0) pid = - pid;
// Correct Pythia6 PIDs for f0(980), f0(1370) mesons
if (pid == 10331) pid = 30221;
if (pid == 10221) pid = 9010221;
map<int, double>::iterator pPartLft = partLftMap.find(pid);
// search stable particle list
if (pPartLft == partLftMap.end()) {
if (pid <= 100) return 0.0;
for (size_t i=0; i < sizeof(stablePDGIds)/sizeof(unsigned int); i++) {
if (pid == stablePDGIds[i]) { lft = 0.0; break; }
}
} else {
lft = (*pPartLft).second;
}
if (lft < 0.0 && PID::isHadron(pid)) {
MSG_ERROR("Could not determine lifetime for particle with PID " << pid
<< "... This V^0 will be considered unprompt!");
}
return lft;
}
// Data members like post-cuts event weight counters go here
const double getMotherLifeTimeSum(const Particle& p) {
if (p.genParticle() == NULL) return -1.;
double lftSum = 0.;
double plft = 0.;
const GenParticle* part = p.genParticle();
const GenVertex* ivtx = part->production_vertex();
while (ivtx) {
if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; };
const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin();
part = (*iPart_invtx);
if ( !(part) ) { lftSum = -1.; break; };
ivtx = part->production_vertex();
if ( (part->pdg_id() == 2212) || !(ivtx) ) break; //reached beam
plft = getLifeTime(part->pdg_id());
if (plft < 0.) { lftSum = -1.; break; };
lftSum += plft;
};
return (lftSum * c_light);
}
/// @name Private variables
//@{
// The rapidity of the beam according to the selected beam energy
double rap_beam;
// The edges of the intervals of transverse momentum
double pt_min, pt1_edge, pt2_edge, pt3_edge;
// The limits of the rapidity window
double rap_min;
double rap_max;
// Indicates which set of histograms will be output to yoda file (according to beam energy)
int dsShift;
// Map between PDG id and particle lifetimes in seconds
std::map<int, double> partLftMap;
// Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable)
static const int stablePDGIds[205];
//@}
/// @name Helper histograms
//@{
/// Histograms are defined in the following order: anti-Lambda, Lambda and K0s.
/// First 3 suites of 3 histograms correspond to each particle in bins of y for the 3 pT intervals. (9 histos)
/// Next 3 histograms contain the particles in y bins for the whole pT interval (3 histos)
/// Next 3 histograms contain the particles in y_loss bins for the whole pT interval (3 histos)
/// Last 3 histograms contain the particles in pT bins for the whole rapidity (y) interval (3 histos)
- YODA::Histo1D _tmphistos[18];
+ Histo1DPtr _tmphistos[18];
+ Scatter2DPtr s1,s2,s3,s4;
//@}
// Fill the PDG Id to Lifetime[seconds] map
// Data was extracted from LHCb Particle Table through LHCb::ParticlePropertySvc
bool fillMap(map<int, double>& m) {
m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16;
m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16;
m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26;
m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12;
m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24;
m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08;
m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24;
m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24;
m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24;
m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23;
m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21;
m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12;
m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13;
m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19;
m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22;
m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23;
m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12;
m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13;
m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24;
m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24;
m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24;
m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24;
m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24;
m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24;
m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24;
m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24;
m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24;
m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24;
m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10;
m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23;
m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22;
m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14;
m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19;
m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19;
m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19;
m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12;
m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12;
m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24;
m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24;
m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24;
m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24;
m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24;
m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23;
m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23;
m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24;
m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24;
m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24;
m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24;
m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23;
m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24;
m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24;
m[13224] = 1.09702E-23; m[13226] = 5.485102E-24; m[13314] = 2.742551E-23;
m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24;
m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24;
m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24;
m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22;
m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24;
m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24;
m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24;
m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24;
m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24;
m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24;
m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24;
m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24;
m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24;
m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24;
m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24;
m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24;
m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24;
m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24;
m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24;
m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24;
m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24;
m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24;
m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20;
m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24;
m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24;
m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24;
m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24; m[9020443] = 1.061633E-23;
m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24;
m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24;
m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21;
return true;
}
};
const int LHCB_2011_I917009::stablePDGIds[205] = {
311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303,
4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414,
4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324,
5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534,
5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112,
12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343,
30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321,
100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555,
120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002,
1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015,
1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039,
2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013,
2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223,
3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001,
4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023,
9900024, 9900041, 9900042 };
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2011_I917009);
}
+
diff --git a/analyses/pluginLHCb/LHCB_2012_I1119400.cc b/analyses/pluginLHCb/LHCB_2012_I1119400.cc
--- a/analyses/pluginLHCb/LHCB_2012_I1119400.cc
+++ b/analyses/pluginLHCb/LHCB_2012_I1119400.cc
@@ -1,357 +1,356 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class LHCB_2012_I1119400 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2012_I1119400() : Analysis("LHCB_2012_I1119400"),
_p_min(5.0),
_pt_min(0.0),_pt1_edge(0.8), _pt2_edge(1.2),
//_eta_nbins(4),
_eta_min(2.5),
_eta_max(4.5)
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
fillMap(_partLftMap);
int id_shift = 0;
if (fuzzyEquals(sqrtS(), 7*TeV)) id_shift = 1;
// define ratios if second pdgid in pair is -1, it means that is a antiparticle/particle ratio
_ratiotype["pbarp"] = make_pair(2212, -1);
_ratiotype["kminuskplus"] = make_pair(321, -1);
_ratiotype["piminuspiplus"] = make_pair(211, -1);
_ratiotype["ppi"] = make_pair(2212, 211);
_ratiotype["kpi"] = make_pair(321, 211);
_ratiotype["pk"] = make_pair(2212, 321);
std::map<string, int > _hepdataid;
_hepdataid["pbarp"] = 1 + id_shift;
_hepdataid["kminuskplus"] = 3 + id_shift;
_hepdataid["piminuspiplus"] = 5 + id_shift;
_hepdataid["ppi"] = 7 + id_shift;
_hepdataid["kpi"] = 9 + id_shift;
_hepdataid["pk"] = 11 + id_shift;
std::map<std::string, std::pair<int, int> >::iterator it;
// booking histograms
for (it=_ratiotype.begin(); it!=_ratiotype.end(); it++) {
book(_h_ratio_lowpt [it->first], _hepdataid[it->first], 1, 1);
book(_h_ratio_midpt [it->first], _hepdataid[it->first], 1, 2);
book(_h_ratio_highpt[it->first], _hepdataid[it->first], 1, 3);
book(_h_num_lowpt [it->first], "TMP/num_l_"+it->first,refData(_hepdataid[it->first], 1, 1));
book(_h_num_midpt [it->first], "TMP/num_m_"+it->first,refData(_hepdataid[it->first], 1, 2));
book(_h_num_highpt [it->first], "TMP/num_h_"+it->first,refData(_hepdataid[it->first], 1, 3));
book(_h_den_lowpt [it->first], "TMP/den_l_"+it->first,refData(_hepdataid[it->first], 1, 1));
book(_h_den_midpt [it->first], "TMP/den_m_"+it->first,refData(_hepdataid[it->first], 1, 2));
book(_h_den_highpt [it->first], "TMP/den_h_"+it->first,refData(_hepdataid[it->first], 1, 3));
}
declare(ChargedFinalState(_eta_min, _eta_max, _pt_min*GeV), "CFS");
}
// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
foreach (const Particle& p, cfs.particles()) {
int id = p.pid();
// continue if particle not a proton, a kaon or a pion
if ( !( (abs(id) == 211) || (abs(id) == 321) || (abs(id) == 2212))) {
continue;
}
// cut in momentum
const FourMomentum& qmom = p.momentum();
if (qmom.p3().mod() < _p_min) continue;
// Lifetime cut: ctau sum of all particle ancestors < 10^-9 m according to the paper (see eq. 5)
const double MAX_CTAU = 1.0e-9; // [m]
double ancestor_lftsum = getMotherLifeTimeSum(p);
if ( (ancestor_lftsum < 0.0) || (ancestor_lftsum > MAX_CTAU) ) continue;
double eta = qmom.eta();
double pT = qmom.pT();
std::map<std::string, std::pair<int, int> >::iterator it;
for (it=_ratiotype.begin(); it!=_ratiotype.end(); it++) {
// check what type of ratio is
if ((it->second.second)==-1) {
// check ptbin
if (pT < _pt1_edge) {
// filling histos for numerator and denominator
- if (id == -abs(it->second.first)) _h_num_lowpt[it->first]->fill(eta, weight);
- if (id == abs(it->second.first)) _h_den_lowpt[it->first]->fill(eta, weight);
+ if (id == -abs(it->second.first)) _h_num_lowpt[it->first]->fill(eta);
+ if (id == abs(it->second.first)) _h_den_lowpt[it->first]->fill(eta);
}
else if (pT < _pt2_edge) {
// filling histos for numerator and denominator
- if (id == -abs(it->second.first)) _h_num_midpt[it->first]->fill(eta, weight);
- if (id == abs(it->second.first)) _h_den_midpt[it->first]->fill(eta, weight);
+ if (id == -abs(it->second.first)) _h_num_midpt[it->first]->fill(eta);
+ if (id == abs(it->second.first)) _h_den_midpt[it->first]->fill(eta);
}
else {
// filling histos for numerator and denominator
- if (id == -abs(it->second.first)) _h_num_highpt[it->first]->fill(eta, weight);
- if (id == abs(it->second.first)) _h_den_highpt[it->first]->fill(eta, weight);
+ if (id == -abs(it->second.first)) _h_num_highpt[it->first]->fill(eta);
+ if (id == abs(it->second.first)) _h_den_highpt[it->first]->fill(eta);
}
}
else {
// check what type of ratio is
if (pT < _pt1_edge) {
// filling histos for numerator and denominator
- if (abs(id) == abs(it->second.first)) _h_num_lowpt[it->first]->fill(eta, weight);
- if (abs(id) == abs(it->second.second)) _h_den_lowpt[it->first]->fill(eta, weight);
+ if (abs(id) == abs(it->second.first)) _h_num_lowpt[it->first]->fill(eta);
+ if (abs(id) == abs(it->second.second)) _h_den_lowpt[it->first]->fill(eta);
}
else if (pT < _pt2_edge) {
// filling histos for numerator and denominator
- if (abs(id) == abs(it->second.first)) _h_num_midpt[it->first]->fill(eta, weight);
- if (abs(id) == abs(it->second.second)) _h_den_midpt[it->first]->fill(eta, weight);
+ if (abs(id) == abs(it->second.first)) _h_num_midpt[it->first]->fill(eta);
+ if (abs(id) == abs(it->second.second)) _h_den_midpt[it->first]->fill(eta);
}
else {
// filling histos for numerator and denominator
- if (abs(id) == abs(it->second.first)) _h_num_highpt[it->first]->fill(eta, weight);
- if (abs(id) == abs(it->second.second)) _h_den_highpt[it->first]->fill(eta, weight);
+ if (abs(id) == abs(it->second.first)) _h_num_highpt[it->first]->fill(eta);
+ if (abs(id) == abs(it->second.second)) _h_den_highpt[it->first]->fill(eta);
}
}
}
}
}
// Generate the ratio histograms
void finalize() {
std::map<std::string, std::pair<int, int> >::iterator it;
// booking histograms
for (it=_ratiotype.begin(); it!=_ratiotype.end(); it++) {
divide(_h_num_lowpt[it->first], _h_den_lowpt[it->first], _h_ratio_lowpt[it->first]);
divide(_h_num_midpt[it->first], _h_den_midpt[it->first], _h_ratio_midpt[it->first]);
divide(_h_num_highpt[it->first], _h_den_highpt[it->first], _h_ratio_highpt[it->first]);
}
}
//@}
private:
// Get particle lifetime from hardcoded data
double getLifeTime(int pid) {
pid = abs(pid);
double lft = -1.0;
map<int, double>::iterator pPartLft = _partLftMap.find(pid);
// search stable particle list
if (pPartLft == _partLftMap.end()) {
if (pid <= 100) return 0.0;
for (size_t i=0; i < sizeof(_stablePDGIds)/sizeof(unsigned int); i++) {
if (pid == _stablePDGIds[i]) {
lft = 0.0;
break;
}
}
} else {
lft = (*pPartLft).second;
}
if (lft < 0.0 && PID::isHadron(pid)) {
MSG_WARNING("Lifetime map imcomplete --- " << pid
<< "... assume zero lifetime");
lft = 0.0;
}
return lft;
}
// Data members like post-cuts event weight counters go here
const double getMotherLifeTimeSum(const Particle& p) {
if (p.genParticle() == NULL) return -1.;
double lftSum = 0.;
double plft = 0.;
const GenParticle* part = p.genParticle();
const GenVertex* ivtx = part->production_vertex();
while(ivtx)
{
if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; };
const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin();
part = (*iPart_invtx);
if ( !(part) ) { lftSum = -1.; break; };
ivtx = part->production_vertex();
if ( (part->pdg_id() == 2212) || !(ivtx) ) break; // reached beam
plft = getLifeTime(part->pdg_id());
if (plft < 0.) { lftSum = -1.; break; };
lftSum += plft;
};
return (lftSum * c_light);
}
/// @name Private variables
// Momentum threshold
double _p_min;
// The edges of the intervals of transversal momentum
double _pt_min;
double _pt1_edge;
double _pt2_edge;
// The limits of the pseudorapidity window
//int _eta_nbins;
double _eta_min;
double _eta_max;
// Map between PDG id and particle lifetimes in seconds
std::map<int, double> _partLftMap;
// Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable)
static const int _stablePDGIds[205];
// Define histograms
// ratio
std::map<std::string, Scatter2DPtr > _h_ratio_lowpt;
std::map<std::string, Scatter2DPtr > _h_ratio_midpt;
std::map<std::string, Scatter2DPtr > _h_ratio_highpt;
// numerator
std::map<std::string, Histo1DPtr > _h_num_lowpt;
std::map<std::string, Histo1DPtr > _h_num_midpt;
std::map<std::string, Histo1DPtr > _h_num_highpt;
// denominator
std::map<std::string, Histo1DPtr > _h_den_lowpt;
std::map<std::string, Histo1DPtr > _h_den_midpt;
std::map<std::string, Histo1DPtr > _h_den_highpt;
// Map of ratios and IDs of numerator and denominator
std::map<string, pair<int,int> > _ratiotype;
// Fill the PDG Id to Lifetime[seconds] map
// Data was extracted from LHCb Particle Table through LHCb::ParticlePropertySvc
bool fillMap(map<int, double> &m) {
m[6] = 4.707703E-25; m[11] = 1.E+16; m[12] = 1.E+16;
m[13] = 2.197019E-06; m[14] = 1.E+16; m[15] = 2.906E-13; m[16] = 1.E+16; m[22] = 1.E+16;
m[23] = 2.637914E-25; m[24] = 3.075758E-25; m[25] = 9.4E-26; m[35] = 9.4E-26;
m[36] = 9.4E-26; m[37] = 9.4E-26; m[84] = 3.335641E-13; m[85] = 1.290893E-12;
m[111] = 8.4E-17; m[113] = 4.405704E-24; m[115] = 6.151516E-24; m[117] = 4.088275E-24;
m[119] = 2.102914E-24; m[130] = 5.116E-08; m[150] = 1.525E-12; m[211] = 2.6033E-08;
m[213] = 4.405704E-24; m[215] = 6.151516E-24; m[217] = 4.088275E-24; m[219] = 2.102914E-24;
m[221] = 5.063171E-19; m[223] = 7.752794E-23; m[225] = 3.555982E-24; m[227] = 3.91793E-24;
m[229] = 2.777267E-24; m[310] = 8.953E-11; m[313] = 1.308573E-23; m[315] = 6.038644E-24;
m[317] = 4.139699E-24; m[319] = 3.324304E-24; m[321] = 1.238E-08; m[323] = 1.295693E-23;
m[325] = 6.682357E-24; m[327] = 4.139699E-24; m[329] = 3.324304E-24; m[331] = 3.210791E-21;
m[333] = 1.545099E-22; m[335] = 9.016605E-24; m[337] = 7.565657E-24; m[350] = 1.407125E-12;
m[411] = 1.04E-12; m[413] = 6.856377E-21; m[415] = 1.778952E-23; m[421] = 4.101E-13;
m[423] = 1.000003E-19; m[425] = 1.530726E-23; m[431] = 5.E-13; m[433] = 1.000003E-19;
m[435] = 3.291061E-23; m[441] = 2.465214E-23; m[443] = 7.062363E-21; m[445] = 3.242425E-22;
m[510] = 1.525E-12; m[511] = 1.525E-12; m[513] = 1.000019E-19; m[515] = 1.31E-23;
m[521] = 1.638E-12; m[523] = 1.000019E-19; m[525] = 1.31E-23; m[530] = 1.536875E-12;
m[531] = 1.472E-12; m[533] = 1.E-19; m[535] = 1.31E-23; m[541] = 4.5E-13;
m[553] = 1.218911E-20; m[1112] = 4.539394E-24; m[1114] = 5.578069E-24; m[1116] = 1.994582E-24;
m[1118] = 2.269697E-24; m[1212] = 4.539394E-24; m[1214] = 5.723584E-24; m[1216] = 1.994582E-24;
m[1218] = 1.316424E-24; m[2112] = 8.857E+02; m[2114] = 5.578069E-24; m[2116] = 4.388081E-24;
m[2118] = 2.269697E-24; m[2122] = 4.539394E-24; m[2124] = 5.723584E-24; m[2126] = 1.994582E-24;
m[2128] = 1.316424E-24; m[2212] = 1.E+16; m[2214] = 5.578069E-24; m[2216] = 4.388081E-24;
m[2218] = 2.269697E-24; m[2222] = 4.539394E-24; m[2224] = 5.578069E-24; m[2226] = 1.994582E-24;
m[2228] = 2.269697E-24; m[3112] = 1.479E-10; m[3114] = 1.670589E-23; m[3116] = 5.485102E-24;
m[3118] = 3.656734E-24; m[3122] = 2.631E-10; m[3124] = 4.219309E-23; m[3126] = 8.227653E-24;
m[3128] = 3.291061E-24; m[3212] = 7.4E-20; m[3214] = 1.828367E-23; m[3216] = 5.485102E-24;
m[3218] = 3.656734E-24; m[3222] = 8.018E-11; m[3224] = 1.838582E-23; m[3226] = 5.485102E-24;
m[3228] = 3.656734E-24; m[3312] = 1.639E-10; m[3314] = 6.648608E-23; m[3322] = 2.9E-10;
m[3324] = 7.233101E-23; m[3334] = 8.21E-11; m[4112] = 2.991874E-22; m[4114] = 4.088274E-23;
m[4122] = 2.E-13; m[4132] = 1.12E-13; m[4212] = 3.999999E-22; m[4214] = 3.291061E-22;
m[4222] = 2.951624E-22; m[4224] = 4.417531E-23; m[4232] = 4.42E-13; m[4332] = 6.9E-14;
m[4412] = 3.335641E-13; m[4422] = 3.335641E-13; m[4432] = 3.335641E-13; m[5112] = 1.E-19;
m[5122] = 1.38E-12; m[5132] = 1.42E-12; m[5142] = 1.290893E-12; m[5212] = 1.E-19;
m[5222] = 1.E-19; m[5232] = 1.42E-12; m[5242] = 1.290893E-12; m[5312] = 1.E-19;
m[5322] = 1.E-19; m[5332] = 1.55E-12; m[5342] = 1.290893E-12; m[5442] = 1.290893E-12;
m[5512] = 1.290893E-12; m[5522] = 1.290893E-12; m[5532] = 1.290893E-12; m[5542] = 1.290893E-12;
m[10111] = 2.48382E-24; m[10113] = 4.635297E-24; m[10115] = 2.54136E-24; m[10211] = 2.48382E-24;
m[10213] = 4.635297E-24; m[10215] = 2.54136E-24; m[10223] = 1.828367E-24; m[10225] = 3.636531E-24;
m[10311] = 2.437823E-24; m[10313] = 7.313469E-24; m[10315] = 3.538775E-24;
m[10321] = 2.437823E-24; m[10323] = 7.313469E-24; m[10325] = 3.538775E-24;
m[10331] = 4.804469E-24; m[10411] = 4.38E-24; m[10413] = 3.29E-23; m[10421] = 4.38E-24;
m[10423] = 3.22653E-23; m[10431] = 6.5821E-22; m[10433] = 6.5821E-22; m[10441] = 6.453061E-23;
m[10511] = 4.39E-24; m[10513] = 1.65E-23; m[10521] = 4.39E-24; m[10523] = 1.65E-23;
m[10531] = 4.39E-24; m[10533] = 1.65E-23; m[11114] = 2.194041E-24; m[11116] = 1.828367E-24;
m[11212] = 1.880606E-24; m[11216] = 1.828367E-24; m[12112] = 2.194041E-24;
m[12114] = 2.194041E-24; m[12116] = 5.063171E-24; m[12126] = 1.828367E-24;
m[12212] = 2.194041E-24; m[12214] = 2.194041E-24; m[12216] = 5.063171E-24;
m[12224] = 2.194041E-24; m[12226] = 1.828367E-24; m[13112] = 6.582122E-24; m[13114] = 1.09702E-23;
m[13116] = 5.485102E-24; m[13122] = 1.316424E-23; m[13124] = 1.09702E-23; m[13126] = 6.928549E-24;
m[13212] = 6.582122E-24; m[13214] = 1.09702E-23; m[13216] = 5.485102E-24; m[13222] = 6.582122E-24;
m[13224] = 1.09702E-23; m[13226] = 5.485102E-24; m[13314] = 2.742551E-23;
m[13324] = 2.742551E-23; m[14122] = 1.828367E-22; m[20022] = 1.E+16; m[20113] = 1.567172E-24;
m[20213] = 1.567172E-24; m[20223] = 2.708692E-23; m[20313] = 3.782829E-24;
m[20315] = 2.384827E-24; m[20323] = 3.782829E-24; m[20325] = 2.384827E-24;
m[20333] = 1.198929E-23; m[20413] = 2.63E-24; m[20423] = 2.63E-24; m[20433] = 6.5821E-22;
m[20443] = 7.395643E-22; m[20513] = 2.63E-24; m[20523] = 2.63E-24; m[20533] = 2.63E-24;
m[21112] = 2.632849E-24; m[21114] = 3.291061E-24; m[21212] = 2.632849E-24;
m[21214] = 6.582122E-24; m[22112] = 4.388081E-24; m[22114] = 3.291061E-24;
m[22122] = 2.632849E-24; m[22124] = 6.582122E-24; m[22212] = 4.388081E-24;
m[22214] = 3.291061E-24; m[22222] = 2.632849E-24; m[22224] = 3.291061E-24;
m[23112] = 7.313469E-24; m[23114] = 2.991874E-24; m[23122] = 4.388081E-24;
m[23124] = 6.582122E-24; m[23126] = 3.291061E-24; m[23212] = 7.313469E-24;
m[23214] = 2.991874E-24; m[23222] = 7.313469E-24; m[23224] = 2.991874E-24;
m[30113] = 2.632849E-24; m[30213] = 2.632849E-24; m[30221] = 1.880606E-24;
m[30223] = 2.089563E-24; m[30313] = 2.056913E-24; m[30323] = 2.056913E-24;
m[30443] = 2.419898E-23; m[31114] = 1.880606E-24; m[31214] = 3.291061E-24;
m[32112] = 3.989164E-24; m[32114] = 1.880606E-24; m[32124] = 3.291061E-24;
m[32212] = 3.989164E-24; m[32214] = 1.880606E-24; m[32224] = 1.880606E-24;
m[33122] = 1.880606E-23; m[42112] = 6.582122E-24; m[42212] = 6.582122E-24;
m[43122] = 2.194041E-24; m[53122] = 4.388081E-24; m[100111] = 1.645531E-24;
m[100113] = 1.64553E-24; m[100211] = 1.645531E-24; m[100213] = 1.64553E-24;
m[100221] = 1.196749E-23; m[100223] = 3.061452E-24; m[100313] = 2.837122E-24;
m[100323] = 2.837122E-24; m[100331] = 4.459432E-25; m[100333] = 4.388081E-24;
m[100441] = 4.701516E-23; m[100443] = 2.076379E-21; m[100553] = 2.056913E-20;
m[200553] = 3.242425E-20; m[300553] = 3.210791E-23; m[9000111] = 8.776163E-24;
m[9000211] = 8.776163E-24; m[9000443] = 8.227652E-24; m[9000553] = 5.983747E-24;
m[9010111] = 3.164482E-24; m[9010211] = 3.164482E-24; m[9010221] = 9.403031E-24;
m[9010443] = 8.438618E-24; m[9010553] = 8.3318E-24; m[9020443] = 1.061633E-23;
m[9030221] = 6.038644E-24; m[9042413] = 2.07634E-21; m[9050225] = 1.394517E-24;
m[9060225] = 3.291061E-24; m[9080225] = 4.388081E-24; m[9090225] = 2.056913E-24;
m[9910445] = 2.07634E-21; m[9920443] = 2.07634E-21;
return true;
}
};
const int LHCB_2012_I1119400::_stablePDGIds[205] = {
311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303,
4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414,
4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324,
5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534,
5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112,
12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343,
30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321,
100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555,
120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002,
1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015,
1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039,
2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013,
2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223,
3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001,
4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023,
9900024, 9900041, 9900042 };
// Plugin hook
DECLARE_RIVET_PLUGIN(LHCB_2012_I1119400);
}
diff --git a/analyses/pluginLHCb/LHCB_2012_I1208102.cc b/analyses/pluginLHCb/LHCB_2012_I1208102.cc.segfault
rename from analyses/pluginLHCb/LHCB_2012_I1208102.cc
rename to analyses/pluginLHCb/LHCB_2012_I1208102.cc.segfault
--- a/analyses/pluginLHCb/LHCB_2012_I1208102.cc
+++ b/analyses/pluginLHCb/LHCB_2012_I1208102.cc.segfault
@@ -1,79 +1,82 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// Differential cross-sections of $\mathrm{Z}/\gamma^* \to e^{+}e^{-}$ vs rapidity and $\phi^*$
class LHCB_2012_I1208102 : public Analysis {
public:
/// Constructor
LHCB_2012_I1208102()
: Analysis("LHCB_2012_I1208102")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
ZFinder zeefinder(FinalState(), Cuts::etaIn(2.0, 4.5) && Cuts::pT > 20*GeV, PID::ELECTRON, 60*GeV, 120*GeV);
declare(zeefinder, "ZeeFinder");
book(_h_sigma_vs_y ,2, 1, 1);
book(_h_sigma_vs_phi ,3, 1, 1);
}
/// Do the analysis
void analyze(const Event& e) {
const ZFinder& zeefinder = apply<ZFinder>(e, "ZeeFinder");
if (zeefinder.empty()) vetoEvent;
if (zeefinder.bosons().size() > 1)
MSG_WARNING("Found multiple (" << zeefinder.bosons().size() << ") Z -> e+ e- decays!");
// Z momenta
const FourMomentum& zee = zeefinder.bosons()[0].momentum();
+
+ if (zeefinder.constituents().size() < 2) vetoEvent;
+
const Particle& pozitron = zeefinder.constituents()[0];
const Particle& electron = zeefinder.constituents()[1];
// Calculation of the angular variable
const double diffphi = deltaPhi(pozitron, electron);
const double diffpsd = deltaEta(pozitron, electron);
const double accphi = M_PI - diffphi;
const double angular = tan(accphi/2) / cosh(diffpsd/2);
// Fill histograms
_h_sigma_vs_y->fill(zee.rapidity());
_h_sigma_vs_phi->fill(angular);
}
/// Finalize
void finalize() {
const double xs = crossSection()/picobarn;
scale(_h_sigma_vs_y, xs/sumOfWeights());
scale(_h_sigma_vs_phi, xs/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_sigma_vs_y, _h_sigma_vs_phi;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2012_I1208102);
}
diff --git a/analyses/pluginLHCb/LHCB_2013_I1208105.cc b/analyses/pluginLHCb/LHCB_2013_I1208105.cc
--- a/analyses/pluginLHCb/LHCB_2013_I1208105.cc
+++ b/analyses/pluginLHCb/LHCB_2013_I1208105.cc
@@ -1,231 +1,235 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class LHCB_2013_I1208105 : public Analysis {
public:
LHCB_2013_I1208105()
: Analysis("LHCB_2013_I1208105")
{ }
void init() {
// Projections
declare(FinalState(1.9, 4.9), "forwardFS");
declare(FinalState(-3.5,-1.5), "backwardFS");
declare(ChargedFinalState(1.9, 4.9), "forwardCFS");
declare(ChargedFinalState(-3.5,-1.5), "backwardCFS");
// Histos
book(_s_chEF_minbias, 1, 1, 1, true);
book(_s_chEF_hard, 2, 1, 1, true);
book(_s_chEF_diff, 3, 1, 1, true);
book(_s_chEF_nondiff, 4, 1, 1, true);
book(_s_totEF_minbias, 5, 1, 1, true);
book(_s_totEF_hard, 6, 1, 1, true);
book(_s_totEF_diff, 7, 1, 1, true);
book(_s_totEF_nondiff, 8, 1, 1, true);
// Temporary profiles and histos
/// @todo Convert to declared/registered temp histos
- _tp_chEF_minbias.reset(new YODA::Profile1D(refData(1,1,1)));
- _tp_chEF_hard.reset(new YODA::Profile1D(refData(2,1,1)));
- _tp_chEF_diff.reset(new YODA::Profile1D(refData(3,1,1)));
- _tp_chEF_nondiff.reset(new YODA::Profile1D(refData(4,1,1)));
- _tp_totEF_minbias.reset(new YODA::Profile1D(refData(5,1,1)));
- _tp_totEF_hard.reset(new YODA::Profile1D(refData(6,1,1)));
- _tp_totEF_diff.reset(new YODA::Profile1D(refData(7,1,1)));
- _tp_totEF_nondiff.reset(new YODA::Profile1D(refData(8,1,1)));
- //
- _th_chN_minbias.reset(new YODA::Histo1D(refData(1,1,1)));
- _th_chN_hard.reset(new YODA::Histo1D(refData(2,1,1)));
- _th_chN_diff.reset(new YODA::Histo1D(refData(3,1,1)));
- _th_chN_nondiff.reset(new YODA::Histo1D(refData(4,1,1)));
- _th_totN_minbias.reset(new YODA::Histo1D(refData(5,1,1)));
- _th_totN_hard.reset(new YODA::Histo1D(refData(6,1,1)));
- _th_totN_diff.reset(new YODA::Histo1D(refData(7,1,1)));
- _th_totN_nondiff.reset(new YODA::Histo1D(refData(8,1,1)));
+ book(_tp_chEF_minbias, "TMP/chEF_minbias", refData(1,1,1));
+ book(_tp_chEF_hard, "TMP/chEF_hard", refData(2,1,1));
+ book(_tp_chEF_diff, "TMP/chEF_diff", refData(3,1,1));
+ book(_tp_chEF_nondiff, "TMP/chEF_nondiff", refData(4,1,1));
+ book(_tp_totEF_minbias, "TMP/totEF_minbias", refData(5,1,1));
+ book(_tp_totEF_hard, "TMP/totEF_hard", refData(6,1,1));
+ book(_tp_totEF_diff, "TMP/totEF_diff", refData(7,1,1));
+ book(_tp_totEF_nondiff, "TMP/totEF_nondiff", refData(8,1,1));
+
+ book(_th_chN_minbias, "TMP/chN_minbias", refData(1,1,1));
+ book(_th_chN_hard, "TMP/chN_hard", refData(2,1,1));
+ book(_th_chN_diff, "TMP/chN_diff", refData(3,1,1));
+ book(_th_chN_nondiff, "TMP/chN_nondiff", refData(4,1,1));
+ book(_th_totN_minbias, "TMP/totN_minbias", refData(5,1,1));
+ book(_th_totN_hard, "TMP/totN_hard", refData(6,1,1));
+ book(_th_totN_diff, "TMP/totN_diff", refData(7,1,1));
+ book(_th_totN_nondiff, "TMP/totN_nondiff", refData(8,1,1));
// Counters
- _mbSumW = 0.0; _hdSumW = 0.0; _dfSumW = 0.0; _ndSumW = 0.0;
- _mbchSumW = 0.0; _hdchSumW = 0.0; _dfchSumW = 0.0; _ndchSumW = 0.0;
+ book(_mbSumW, "TMP/mbSumW");
+ book(_hdSumW, "TMP/hdSumW");
+ book(_dfSumW, "TMP/dfSumW");
+ book(_ndSumW, "TMP/ndSumW");
+ book(_mbchSumW, "TMP/mbchSumW");
+ book(_hdchSumW, "TMP/hdchSumW");
+ book(_dfchSumW, "TMP/dfchSumW");
+ book(_ndchSumW, "TMP/ndchSumW");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
const FinalState& ffs = apply<FinalState>(event, "forwardFS");
const FinalState& bfs = apply<FinalState>(event, "backwardFS");
const ChargedFinalState& fcfs = apply<ChargedFinalState>(event, "forwardCFS");
const ChargedFinalState& bcfs = apply<ChargedFinalState>(event, "backwardCFS");
// Veto this event completely if there are no forward *charged* particles
if (fcfs.empty()) vetoEvent;
// Charged and neutral version
{
// Decide empirically if this is a "hard" or "diffractive" event
bool ishardEvt = false;
foreach (const Particle& p, ffs.particles()) {
if (p.pT() > 3.0*GeV) { ishardEvt = true; break; }
}
// Decide empirically if this is a "diffractive" event
/// @todo Can be "diffractive" *and* "hard"?
bool isdiffEvt = (bfs.size() == 0);
// Update event-type weight counters
- _mbSumW += weight;
- (isdiffEvt ? _dfSumW : _ndSumW) += weight;
- if (ishardEvt) _hdSumW += weight;
+ _mbSumW->fill();
+ (isdiffEvt ? _dfSumW : _ndSumW)->fill();
+ if (ishardEvt) _hdSumW->fill();
// Plot energy flow
foreach (const Particle& p, ffs.particles()) {
const double eta = p.eta();
const double energy = p.E();
- _tp_totEF_minbias->fill(eta, energy, weight);
- _th_totN_minbias->fill(eta, weight);
+ _tp_totEF_minbias->fill(eta, energy);
+ _th_totN_minbias->fill(eta);
if (ishardEvt) {
- _tp_totEF_hard->fill(eta, energy, weight);
- _th_totN_hard->fill(eta, weight);
+ _tp_totEF_hard->fill(eta, energy);
+ _th_totN_hard->fill(eta);
}
if (isdiffEvt) {
- _tp_totEF_diff->fill(eta, energy, weight);
- _th_totN_diff->fill(eta, weight);
+ _tp_totEF_diff->fill(eta, energy);
+ _th_totN_diff->fill(eta);
} else {
- _tp_totEF_nondiff->fill(eta, energy, weight);
- _th_totN_nondiff->fill(eta, weight);
+ _tp_totEF_nondiff->fill(eta, energy);
+ _th_totN_nondiff->fill(eta);
}
}
}
// Charged-only version
{
bool ishardEvt = false;
foreach (const Particle& p, fcfs.particles()) {
if (p.pT() > 3.0*GeV) { ishardEvt = true; break; }
}
// Decide empirically if this is a "diffractive" event
/// @todo Can be "diffractive" *and* "hard"?
bool isdiffEvt = (bcfs.size() == 0);
// Update event-type weight counters
- _mbchSumW += weight;
- (isdiffEvt ? _dfchSumW : _ndchSumW) += weight;
- if (ishardEvt) _hdchSumW += weight;
+ _mbchSumW->fill();
+ (isdiffEvt ? _dfchSumW : _ndchSumW)->fill();
+ if (ishardEvt) _hdchSumW->fill();
// Plot energy flow
foreach (const Particle& p, fcfs.particles()) {
const double eta = p.eta();
const double energy = p.E();
- _tp_chEF_minbias->fill(eta, energy, weight);
- _th_chN_minbias->fill(eta, weight);
+ _tp_chEF_minbias->fill(eta, energy);
+ _th_chN_minbias->fill(eta);
if (ishardEvt) {
- _tp_chEF_hard->fill(eta, energy, weight);
- _th_chN_hard->fill(eta, weight);
+ _tp_chEF_hard->fill(eta, energy);
+ _th_chN_hard->fill(eta);
}
if (isdiffEvt) {
- _tp_chEF_diff->fill(eta, energy, weight);
- _th_chN_diff->fill(eta, weight);
+ _tp_chEF_diff->fill(eta, energy);
+ _th_chN_diff->fill(eta);
} else {
- _tp_chEF_nondiff->fill(eta, energy, weight);
- _th_chN_nondiff->fill(eta, weight);
+ _tp_chEF_nondiff->fill(eta, energy);
+ _th_chN_nondiff->fill(eta);
}
}
}
}
void finalize() {
for (size_t i = 0; i < _s_totEF_minbias->numPoints(); ++i) {
const double val = _tp_totEF_minbias->bin(i).mean() * _th_totN_minbias->bin(i).height();
const double err = (_tp_totEF_minbias->bin(i).mean() * _th_totN_minbias->bin(i).heightErr() +
_tp_totEF_minbias->bin(i).stdErr() * _th_totN_minbias->bin(i).height());
_s_totEF_minbias->point(i).setY(val/_mbSumW, err/_mbSumW);
}
for (size_t i = 0; i < _s_totEF_hard->numPoints(); ++i) {
const double val = _tp_totEF_hard->bin(i).mean() * _th_totN_hard->bin(i).height();
const double err = (_tp_totEF_hard->bin(i).mean() * _th_totN_hard->bin(i).heightErr() +
_tp_totEF_hard->bin(i).stdErr() * _th_totN_hard->bin(i).height());
_s_totEF_hard->point(i).setY(val/_hdSumW, err/_hdSumW);
}
for (size_t i = 0; i < _s_totEF_diff->numPoints(); ++i) {
const double val = _tp_totEF_diff->bin(i).mean() * _th_totN_diff->bin(i).height();
const double err = (_tp_totEF_diff->bin(i).mean() * _th_totN_diff->bin(i).heightErr() +
_tp_totEF_diff->bin(i).stdErr() * _th_totN_diff->bin(i).height());
_s_totEF_diff->point(i).setY(val/_dfSumW, err/_dfSumW);
}
for (size_t i = 0; i < _s_totEF_nondiff->numPoints(); ++i) {
const double val = _tp_totEF_nondiff->bin(i).mean() * _th_totN_nondiff->bin(i).height();
const double err = (_tp_totEF_nondiff->bin(i).mean() * _th_totN_nondiff->bin(i).heightErr() +
_tp_totEF_nondiff->bin(i).stdErr() * _th_totN_nondiff->bin(i).height());
_s_totEF_nondiff->point(i).setY(val/_ndSumW, err/_ndSumW);
}
for (size_t i = 0; i < _s_chEF_minbias->numPoints(); ++i) {
const double val = _tp_chEF_minbias->bin(i).mean() * _th_chN_minbias->bin(i).height();
const double err = (_tp_chEF_minbias->bin(i).mean() * _th_chN_minbias->bin(i).heightErr() +
_tp_chEF_minbias->bin(i).stdErr() * _th_chN_minbias->bin(i).height());
_s_chEF_minbias->point(i).setY(val/_mbchSumW, err/_mbchSumW);
}
for (size_t i = 0; i < _s_chEF_hard->numPoints(); ++i) {
const double val = _tp_chEF_hard->bin(i).mean() * _th_chN_hard->bin(i).height();
const double err = (_tp_chEF_hard->bin(i).mean() * _th_chN_hard->bin(i).heightErr() +
_tp_chEF_hard->bin(i).stdErr() * _th_chN_hard->bin(i).height());
_s_chEF_hard->point(i).setY(val/_hdchSumW, err/_hdchSumW);
}
for (size_t i = 0; i < _s_chEF_diff->numPoints(); ++i) {
const double val = _tp_chEF_diff->bin(i).mean() * _th_chN_diff->bin(i).height();
const double err = (_tp_chEF_diff->bin(i).mean() * _th_chN_diff->bin(i).heightErr() +
_tp_chEF_diff->bin(i).stdErr() * _th_chN_diff->bin(i).height());
_s_chEF_diff->point(i).setY(val/_dfchSumW, err/_dfchSumW);
}
for (size_t i = 0; i < _s_chEF_nondiff->numPoints(); ++i) {
const double val = _tp_chEF_nondiff->bin(i).mean() * _th_chN_nondiff->bin(i).height();
const double err = (_tp_chEF_nondiff->bin(i).mean() * _th_chN_nondiff->bin(i).heightErr() +
_tp_chEF_nondiff->bin(i).stdErr() * _th_chN_nondiff->bin(i).height());
_s_chEF_nondiff->point(i).setY(val/_ndchSumW, err/_ndchSumW);
}
}
private:
/// @name Histograms and counters
///
/// @note Histograms correspond to charged and total EF for each class of events:
/// minimum bias, hard scattering, diffractive enriched and non-diffractive enriched.
//@{
// Scatters to be filled in finalize with 1/d_eta <N(eta)><E(eta)>
Scatter2DPtr _s_totEF_minbias, _s_totEF_hard, _s_totEF_diff, _s_totEF_nondiff;
Scatter2DPtr _s_chEF_minbias, _s_chEF_hard, _s_chEF_diff, _s_chEF_nondiff;
// Temp profiles containing <E(eta)>
- std::shared_ptr<YODA::Profile1D> _tp_totEF_minbias, _tp_totEF_hard, _tp_totEF_diff, _tp_totEF_nondiff;
- std::shared_ptr<YODA::Profile1D> _tp_chEF_minbias, _tp_chEF_hard, _tp_chEF_diff, _tp_chEF_nondiff;
+ Profile1DPtr _tp_totEF_minbias, _tp_totEF_hard, _tp_totEF_diff, _tp_totEF_nondiff;
+ Profile1DPtr _tp_chEF_minbias, _tp_chEF_hard, _tp_chEF_diff, _tp_chEF_nondiff;
// Temp profiles containing <N(eta)>
- std::shared_ptr<YODA::Histo1D> _th_totN_minbias, _th_totN_hard, _th_totN_diff, _th_totN_nondiff;
- std::shared_ptr<YODA::Histo1D> _th_chN_minbias, _th_chN_hard, _th_chN_diff, _th_chN_nondiff;
+ Histo1DPtr _th_totN_minbias, _th_totN_hard, _th_totN_diff, _th_totN_nondiff;
+ Histo1DPtr _th_chN_minbias, _th_chN_hard, _th_chN_diff, _th_chN_nondiff;
// Sums of weights (~ #events) in each event class
- double _mbSumW, _hdSumW, _dfSumW, _ndSumW;
- double _mbchSumW, _hdchSumW, _dfchSumW, _ndchSumW;
+ CounterPtr _mbSumW, _hdSumW, _dfSumW, _ndSumW;
+ CounterPtr _mbchSumW, _hdchSumW, _dfchSumW, _ndchSumW;
//@}
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2013_I1208105);
}
diff --git a/analyses/pluginLHCb/LHCB_2014_I1281685.cc b/analyses/pluginLHCb/LHCB_2014_I1281685.cc
--- a/analyses/pluginLHCb/LHCB_2014_I1281685.cc
+++ b/analyses/pluginLHCb/LHCB_2014_I1281685.cc
@@ -1,1178 +1,1177 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// Charged particle multiplicities and densities in $pp$ collisions at $\sqrt{s} = 7$ TeV
class LHCB_2014_I1281685 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2014_I1281685()
: Analysis("LHCB_2014_I1281685"),
_p_min(2.0),
_pt_min(0.2),
_eta_min(2.0),
_eta_max(4.8),
_maxlft(1.0e-11)
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
fillMap(_partLftMap);
// Projections
declare(ChargedFinalState(_eta_min, _eta_max, _pt_min*GeV), "CFS");
// Book histograms
book(_h_mult_total ,"d03-x01-y01", 50, 0.5, 50.5);
book(_h_mult_eta[0] ,"d04-x01-y01", 21, -0.5, 20.5); //eta=[2.0,2.5]
book(_h_mult_eta[1] ,"d04-x01-y02", 21, -0.5, 20.5); //eta=[2.5,3.0]
book(_h_mult_eta[2] ,"d04-x01-y03", 21, -0.5, 20.5); //eta=[3.0,3.5]
book(_h_mult_eta[3] ,"d04-x01-y04", 21, -0.5, 20.5); //eta=[3.5,4.0]
book(_h_mult_eta[4] ,"d04-x01-y05", 21, -0.5, 20.5); //eta=[4.0,4.5]
book(_h_mult_pt[0] ,"d05-x01-y01", 21, -0.5, 20.5); //pT=[0.2,0.3]GeV
book(_h_mult_pt[1] ,"d05-x01-y02", 21, -0.5, 20.5); //pT=[0.3,0.4]GeV
book(_h_mult_pt[2] ,"d05-x01-y03", 21, -0.5, 20.5); //pT=[0.4,0.6]GeV
book(_h_mult_pt[3] ,"d05-x01-y04", 21, -0.5, 20.5); //pT=[0.6,1.0]GeV
book(_h_mult_pt[4] ,"d05-x01-y05", 21, -0.5, 20.5); //pT=[1.0,2.0]GeV
book(_h_dndeta ,"d01-x01-y01", 14, 2.0, 4.8); //eta=[2,4.8]
book(_h_dndpt ,"d02-x01-y01", 18, 0.2, 2.0); //pT =[0,2]GeV
// Counters
- _sumW = 0;
+ book(_sumW, "TMP/sumW");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Variable to store multiplicities per event
int LHCbcountAll = 0; //count particles fulfiling all requirements
int LHCbcountEta[8] = {0,0,0,0,0,0,0,0}; //count per eta-bin
int LHCbcountPt[7] = {0,0,0,0,0,0,0}; //count per pT-bin
vector<double> val_dNdEta;
vector<double> val_dNdPt;
val_dNdEta.clear();
val_dNdPt.clear();
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
foreach (const Particle& p, cfs.particles()) {
int id = p.pdgId();
// continue if particle is not a pion, kaon, proton, muon or electron
if ( !( (abs(id) == 211) || (abs(id) == 321) || (abs(id) == 2212) || (abs(id) == 13) || (abs(id) == 11)) ) {
continue;
}
const FourMomentum& qmom = p.momentum();
const double eta = p.momentum().eta();
const double pT = p.momentum().pT();
//minimum momentum
if (qmom.p3().mod() < _p_min) continue;
//minimum tr. momentum
if (pT < _pt_min) continue;
//eta range
if ((eta < _eta_min) || (eta > _eta_max)) continue;
/* Select only prompt particles via lifetime */
//Sum of all mother lifetimes (PDG lifetime) < 10ps
double ancestors_sumlft = getAncestorSumLifetime(p);
if( (ancestors_sumlft > _maxlft) || (ancestors_sumlft < 0) ) continue;
//after all cuts;
LHCbcountAll++; //count particles in whole kin. range
//in eta bins
if( eta >2.0 && eta <= 2.5) LHCbcountEta[0]++;
if( eta >2.5 && eta <= 3.0) LHCbcountEta[1]++;
if( eta >3.0 && eta <= 3.5) LHCbcountEta[2]++;
if( eta >3.5 && eta <= 4.0) LHCbcountEta[3]++;
if( eta >4.0 && eta <= 4.5) LHCbcountEta[4]++;
if( eta >2.0 && eta <= 4.8) LHCbcountEta[5]++; //cross-check
//in pT bins
if( pT > 0.2 && pT <= 0.3) LHCbcountPt[0]++;
if( pT > 0.3 && pT <= 0.4) LHCbcountPt[1]++;
if( pT > 0.4 && pT <= 0.6) LHCbcountPt[2]++;
if( pT > 0.6 && pT <= 1.0) LHCbcountPt[3]++;
if( pT > 1.0 && pT <= 2.0) LHCbcountPt[4]++;
if( pT > 0.2) LHCbcountPt[5]++; //cross-check
//particle densities -> need proper normalization (finalize)
val_dNdPt.push_back( pT );
val_dNdEta.push_back( eta );
}//end foreach
// Fill histograms only, if at least 1 particle pre event was within the
// kinematic range of the analysis!
if (LHCbcountAll) {
- const double weight = 1.0;
- _sumW += weight;
+ _sumW->fill();
- _h_mult_total->fill(LHCbcountAll, weight);
+ _h_mult_total->fill(LHCbcountAll);
- _h_mult_eta[0]->fill(LHCbcountEta[0], weight);
- _h_mult_eta[1]->fill(LHCbcountEta[1], weight);
- _h_mult_eta[2]->fill(LHCbcountEta[2], weight);
- _h_mult_eta[3]->fill(LHCbcountEta[3], weight);
- _h_mult_eta[4]->fill(LHCbcountEta[4], weight);
+ _h_mult_eta[0]->fill(LHCbcountEta[0]);
+ _h_mult_eta[1]->fill(LHCbcountEta[1]);
+ _h_mult_eta[2]->fill(LHCbcountEta[2]);
+ _h_mult_eta[3]->fill(LHCbcountEta[3]);
+ _h_mult_eta[4]->fill(LHCbcountEta[4]);
- _h_mult_pt[0]->fill(LHCbcountPt[0], weight);
- _h_mult_pt[1]->fill(LHCbcountPt[1], weight);
- _h_mult_pt[2]->fill(LHCbcountPt[2], weight);
- _h_mult_pt[3]->fill(LHCbcountPt[3], weight);
- _h_mult_pt[4]->fill(LHCbcountPt[4], weight);
+ _h_mult_pt[0]->fill(LHCbcountPt[0]);
+ _h_mult_pt[1]->fill(LHCbcountPt[1]);
+ _h_mult_pt[2]->fill(LHCbcountPt[2]);
+ _h_mult_pt[3]->fill(LHCbcountPt[3]);
+ _h_mult_pt[4]->fill(LHCbcountPt[4]);
for (size_t part = 0; part < val_dNdEta.size(); part++)
- _h_dndeta->fill(val_dNdEta[part], weight);
+ _h_dndeta->fill(val_dNdEta[part]);
for (size_t part = 0; part < val_dNdPt.size(); part++)
- _h_dndpt->fill(val_dNdPt[part], weight);
+ _h_dndpt->fill(val_dNdPt[part]);
}
}
/// Normalise histograms etc., after the run
void finalize() {
const double scalefactor = 1.0/_sumW; // normalize multiplicity histograms by nEvents
const double scale1k = 1000.; // to match '10^3' scale in reference histograms
scale( _h_dndeta, scalefactor );
scale( _h_dndpt, scalefactor*0.1 ); //additional factor 0.1 for [0.1 GeV/c]
scale( _h_mult_total, scalefactor*scale1k);
_h_mult_eta[0]->scaleW( scalefactor*scale1k );
_h_mult_eta[1]->scaleW( scalefactor*scale1k );
_h_mult_eta[2]->scaleW( scalefactor*scale1k );
_h_mult_eta[3]->scaleW( scalefactor*scale1k );
_h_mult_eta[4]->scaleW( scalefactor*scale1k );
_h_mult_pt[0]->scaleW( scalefactor*scale1k );
_h_mult_pt[1]->scaleW( scalefactor*scale1k );
_h_mult_pt[2]->scaleW( scalefactor*scale1k );
_h_mult_pt[3]->scaleW( scalefactor*scale1k );
_h_mult_pt[4]->scaleW( scalefactor*scale1k );
}
//@}
private:
// Get mean PDG lifetime for particle with PID
double getLifetime(int pid) {
double lft = 0.;
map<int, double>::iterator pPartLft = _partLftMap.find(pid);
if (pPartLft != _partLftMap.end()) {
lft = (*pPartLft).second;
} else {
// allow identifying missing life times only in debug mode
MSG_DEBUG("Could not determine lifetime for particle with PID " << pid << "... Assume non-prompt particle");
lft = -1;
}
return lft;
}
// Get sum of all ancestor particles
const double getAncestorSumLifetime(const Particle& p) {
double lftSum = 0.;
double plft = 0.;
const GenParticle* part = p.genParticle();
if ( 0 == part ) return -1;
const GenVertex* ivtx = part->production_vertex();
while(ivtx) {
if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; };
const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin();
part = (*iPart_invtx);
if ( !(part) ) { lftSum = -1.; break; };
ivtx = part->production_vertex();
if ( (part->pdg_id() == 2212) || !(ivtx) ) break; // reached beam
plft = getLifetime(part->pdg_id());
if (plft < 0.) { lftSum = -1.; break; };
lftSum += plft;
}
return (lftSum);
}
/// Hard-coded map linking PDG ID with PDG lifetime[s] (converted from ParticleTable.txt)
bool fillMap(map<int, double>& m) {
// PDGID = LIFETIME
m[22] = 1.000000e+016;
m[-11] = 1.000000e+016;
m[11] = 1.000000e+016;
m[12] = 1.000000e+016;
m[-13] = 2.197036e-006;
m[13] = 2.197036e-006;
m[111] = 8.438618e-017;
m[211] = 2.603276e-008;
m[-211] = 2.603276e-008;
m[130] = 5.174624e-008;
m[321] = 1.238405e-008;
m[-321] = 1.238405e-008;
m[2112] = 885.646128;
m[2212] = 1.000000e+016;
m[-2212] = 1.000000e+016;
m[310] = 8.934603e-011;
m[221] = 5.578070e-019;
m[3122] = 2.631796e-010;
m[3222] = 8.018178e-011;
m[3212] = 7.395643e-020;
m[3112] = 1.479129e-010;
m[3322] = 2.899613e-010;
m[3312] = 1.637344e-010;
m[3334] = 8.207135e-011;
m[-2112] = 885.646128;
m[-3122] = 2.631796e-010;
m[-3222] = 8.018178e-011;
m[-3212] = 7.395643e-020;
m[-3112] = 1.479129e-010;
m[-3322] = 2.899613e-010;
m[-3312] = 1.637344e-010;
m[-3334] = 8.207135e-011;
m[113] = 4.411610e-024;
m[213] = 4.411610e-024;
m[-213] = 4.411610e-024;
m[223] = 7.798723e-023;
m[333] = 1.545099e-022;
m[323] = 1.295693e-023;
m[-323] = 1.295693e-023;
m[313] = 1.298249e-023;
m[-313] = 1.298249e-023;
m[20213] = 1.500000e-024;
m[-20213] = 1.500000e-024;
m[450000000] = 1.000000e+015;
m[460000000] = 1.000000e+015;
m[470000000] = 1.000000e+015;
m[480000000] = 1.000000e+015;
m[490000000] = 1.000000e+015;
m[20022] = 1.000000e+016;
m[-15] = 2.906014e-013;
m[15] = 2.906014e-013;
m[24] = 3.104775e-025;
m[-24] = 3.104775e-025;
m[23] = 2.637914e-025;
m[411] = 1.051457e-012;
m[-411] = 1.051457e-012;
m[421] = 4.116399e-013;
m[-421] = 4.116399e-013;
m[431] = 4.904711e-013;
m[-431] = 4.904711e-013;
m[4122] = 1.994582e-013;
m[-4122] = 1.994582e-013;
m[443] = 7.565657e-021;
m[413] = 6.856377e-021;
m[-413] = 6.856377e-021;
m[423] = 1.000003e-019;
m[-423] = 1.000003e-019;
m[433] = 1.000003e-019;
m[-433] = 1.000003e-019;
m[521] = 1.671000e-012;
m[-521] = 1.671000e-012;
m[511] = 1.536000e-012;
m[-511] = 1.536000e-012;
m[531] = 1.461000e-012;
m[-531] = 1.461000e-012;
m[541] = 4.600000e-013;
m[-541] = 4.600000e-013;
m[5122] = 1.229000e-012;
m[-5122] = 1.229000e-012;
m[4112] = 4.388081e-022;
m[-4112] = 4.388081e-022;
m[4212] = 3.999999e-022;
m[-4212] = 3.999999e-022;
m[4222] = 3.291060e-022;
m[-4222] = 3.291060e-022;
m[25] = 9.400000e-026;
m[35] = 9.400000e-026;
m[36] = 9.400000e-026;
m[37] = 9.400000e-026;
m[-37] = 9.400000e-026;
m[4312] = 9.800002e-014;
m[-4312] = 9.800002e-014;
m[4322] = 3.500001e-013;
m[-4322] = 3.500001e-013;
m[4332] = 6.453061e-014;
m[-4332] = 6.453061e-014;
m[4132] = 9.824063e-014;
m[-4132] = 9.824063e-014;
m[4232] = 4.417532e-013;
m[-4232] = 4.417532e-013;
m[5222] = 1.000000e-019;
m[-5222] = 1.000000e-019;
m[5212] = 1.000000e-019;
m[-5212] = 1.000000e-019;
m[5112] = 1.000000e-019;
m[-5112] = 1.000000e-019;
m[5312] = 1.000000e-019;
m[-5312] = 1.000000e-019;
m[5322] = 1.000000e-019;
m[-5322] = 1.000000e-019;
m[5332] = 1.550000e-012;
m[-5332] = 1.550000e-012;
m[5132] = 1.390000e-012;
m[-5132] = 1.390000e-012;
m[5232] = 1.390000e-012;
m[-5232] = 1.390000e-012;
m[100443] = 2.194041e-021;
m[331] = 3.258476e-021;
m[441] = 4.113826e-023;
m[10441] = 4.063038e-023;
m[20443] = 7.154480e-022;
m[445] = 3.164482e-022;
m[9000111] = 1.149997e-023;
m[9000211] = 1.149997e-023;
m[-9000211] = 1.149997e-023;
m[20113] = 1.500000e-024;
m[115] = 6.151516e-024;
m[215] = 6.151516e-024;
m[-215] = 6.151516e-024;
m[10323] = 7.313469e-024;
m[-10323] = 7.313469e-024;
m[10313] = 7.313469e-024;
m[-10313] = 7.313469e-024;
m[20323] = 3.782829e-024;
m[-20323] = 3.782829e-024;
m[20313] = 3.782829e-024;
m[-20313] = 3.782829e-024;
m[10321] = 2.238817e-024;
m[-10321] = 2.238817e-024;
m[10311] = 2.238817e-024;
m[-10311] = 2.238817e-024;
m[325] = 6.682357e-024;
m[-325] = 6.682357e-024;
m[315] = 6.038644e-024;
m[-315] = 6.038644e-024;
m[10411] = 4.380000e-024;
m[20413] = 2.630000e-024;
m[10413] = 3.290000e-023;
m[-415] = 2.632849e-023;
m[-10411] = 4.380000e-024;
m[-20413] = 2.630000e-024;
m[-10413] = 3.290000e-023;
m[415] = 2.632849e-023;
m[10421] = 4.380000e-024;
m[20423] = 2.630000e-024;
m[10423] = 3.482604e-023;
m[-425] = 2.861792e-023;
m[-10421] = 4.380000e-024;
m[-20423] = 2.630000e-024;
m[-10423] = 3.482604e-023;
m[425] = 2.861792e-023;
m[10431] = 6.582100e-022;
m[20433] = 6.582100e-022;
m[10433] = 6.582100e-022;
m[435] = 4.388100e-023;
m[-10431] = 6.582100e-022;
m[-20433] = 6.582100e-022;
m[-10433] = 6.582100e-022;
m[-435] = 4.388100e-023;
m[2224] = 5.485102e-024;
m[2214] = 5.485102e-024;
m[2114] = 5.485102e-024;
m[1114] = 5.485102e-024;
m[-2224] = 5.485102e-024;
m[-2214] = 5.485102e-024;
m[-2114] = 5.485102e-024;
m[-1114] = 5.485102e-024;
m[-523] = 1.000019e-019;
m[523] = 1.000019e-019;
m[513] = 1.000019e-019;
m[-513] = 1.000019e-019;
m[533] = 1.000000e-019;
m[-533] = 1.000000e-019;
m[10521] = 4.390000e-024;
m[20523] = 2.630000e-024;
m[10523] = 1.650000e-023;
m[525] = 1.310000e-023;
m[-10521] = 4.390000e-024;
m[-20523] = 2.630000e-024;
m[-10523] = 1.650000e-023;
m[-525] = 1.310000e-023;
m[10511] = 4.390000e-024;
m[20513] = 2.630000e-024;
m[10513] = 1.650000e-023;
m[515] = 1.310000e-023;
m[-10511] = 4.390000e-024;
m[-20513] = 2.630000e-024;
m[-10513] = 1.650000e-023;
m[-515] = 1.310000e-023;
m[10531] = 4.390000e-024;
m[20533] = 2.630000e-024;
m[10533] = 1.650000e-023;
m[535] = 1.310000e-023;
m[-10531] = 4.390000e-024;
m[-20533] = 2.630000e-024;
m[-10533] = 1.650000e-023;
m[-535] = 1.310000e-023;
m[14] = 1.000000e+016;
m[-14] = 1.000000e+016;
m[-12] = 1.000000e+016;
m[1] = 0.000000e+000;
m[-1] = 0.000000e+000;
m[2] = 0.000000e+000;
m[-2] = 0.000000e+000;
m[3] = 0.000000e+000;
m[-3] = 0.000000e+000;
m[4] = 0.000000e+000;
m[-4] = 0.000000e+000;
m[5] = 0.000000e+000;
m[-5] = 0.000000e+000;
m[6] = 4.707703e-025;
m[-6] = 4.707703e-025;
m[7] = 0.000000e+000;
m[-7] = 0.000000e+000;
m[8] = 0.000000e+000;
m[-8] = 0.000000e+000;
m[16] = 1.000000e+016;
m[-16] = 1.000000e+016;
m[17] = 0.000000e+000;
m[-17] = 0.000000e+000;
m[18] = 0.000000e+000;
m[-18] = 0.000000e+000;
m[21] = 0.000000e+000;
m[32] = 0.000000e+000;
m[33] = 0.000000e+000;
m[34] = 0.000000e+000;
m[-34] = 0.000000e+000;
m[39] = 0.000000e+000;
m[41] = 0.000000e+000;
m[-41] = 0.000000e+000;
m[42] = 0.000000e+000;
m[-42] = 0.000000e+000;
m[43] = 0.000000e+000;
m[44] = 0.000000e+000;
m[-44] = 0.000000e+000;
m[81] = 0.000000e+000;
m[82] = 0.000000e+000;
m[-82] = 0.000000e+000;
m[83] = 0.000000e+000;
m[84] = 3.335641e-013;
m[-84] = 3.335641e-013;
m[85] = 1.290893e-012;
m[-85] = 1.290893e-012;
m[86] = 0.000000e+000;
m[-86] = 0.000000e+000;
m[87] = 0.000000e+000;
m[-87] = 0.000000e+000;
m[88] = 0.000000e+000;
m[90] = 0.000000e+000;
m[91] = 0.000000e+000;
m[92] = 0.000000e+000;
m[93] = 0.000000e+000;
m[94] = 0.000000e+000;
m[95] = 0.000000e+000;
m[96] = 0.000000e+000;
m[97] = 0.000000e+000;
m[98] = 0.000000e+000;
m[99] = 0.000000e+000;
m[117] = 4.088275e-024;
m[119] = 1.828367e-024;
m[217] = 4.088275e-024;
m[-217] = 4.088275e-024;
m[219] = 1.828367e-024;
m[-219] = 1.828367e-024;
m[225] = 3.555982e-024;
m[227] = 3.917930e-024;
m[229] = 3.392846e-024;
m[311] = 1.000000e+016;
m[-311] = 1.000000e+016;
m[317] = 4.139699e-024;
m[-317] = 4.139699e-024;
m[319] = 3.324304e-024;
m[-319] = 3.324304e-024;
m[327] = 4.139699e-024;
m[-327] = 4.139699e-024;
m[329] = 3.324304e-024;
m[-329] = 3.324304e-024;
m[335] = 8.660687e-024;
m[337] = 7.565657e-024;
m[543] = 0.000000e+000;
m[-543] = 0.000000e+000;
m[545] = 0.000000e+000;
m[-545] = 0.000000e+000;
m[551] = 0.000000e+000;
m[553] = 1.253738e-020;
m[555] = 1.000000e+016;
m[557] = 0.000000e+000;
m[-450000000] = 0.000000e+000;
m[-490000000] = 0.000000e+000;
m[-460000000] = 0.000000e+000;
m[-470000000] = 0.000000e+000;
m[1103] = 0.000000e+000;
m[-1103] = 0.000000e+000;
m[1112] = 4.388081e-024;
m[-1112] = 4.388081e-024;
m[1116] = 1.880606e-024;
m[-1116] = 1.880606e-024;
m[1118] = 2.194041e-024;
m[-1118] = 2.194041e-024;
m[1212] = 4.388081e-024;
m[-1212] = 4.388081e-024;
m[1214] = 5.485102e-024;
m[-1214] = 5.485102e-024;
m[1216] = 1.880606e-024;
m[-1216] = 1.880606e-024;
m[1218] = 1.462694e-024;
m[-1218] = 1.462694e-024;
m[2101] = 0.000000e+000;
m[-2101] = 0.000000e+000;
m[2103] = 0.000000e+000;
m[-2103] = 0.000000e+000;
m[2116] = 4.388081e-024;
m[-2116] = 4.388081e-024;
m[2118] = 2.194041e-024;
m[-2118] = 2.194041e-024;
m[2122] = 4.388081e-024;
m[-2122] = 4.388081e-024;
m[2124] = 5.485102e-024;
m[-2124] = 5.485102e-024;
m[2126] = 1.880606e-024;
m[-2126] = 1.880606e-024;
m[2128] = 1.462694e-024;
m[-2128] = 1.462694e-024;
m[2203] = 0.000000e+000;
m[-2203] = 0.000000e+000;
m[2216] = 4.388081e-024;
m[-2216] = 4.388081e-024;
m[2218] = 2.194041e-024;
m[-2218] = 2.194041e-024;
m[2222] = 4.388081e-024;
m[-2222] = 4.388081e-024;
m[2226] = 1.880606e-024;
m[-2226] = 1.880606e-024;
m[2228] = 2.194041e-024;
m[-2228] = 2.194041e-024;
m[3101] = 0.000000e+000;
m[-3101] = 0.000000e+000;
m[3103] = 0.000000e+000;
m[-3103] = 0.000000e+000;
m[3114] = 1.670589e-023;
m[-3114] = 1.670589e-023;
m[3116] = 5.485102e-024;
m[-3116] = 5.485102e-024;
m[3118] = 3.656734e-024;
m[-3118] = 3.656734e-024;
m[3124] = 4.219309e-023;
m[-3124] = 4.219309e-023;
m[3126] = 8.227653e-024;
m[-3126] = 8.227653e-024;
m[3128] = 3.291061e-024;
m[-3128] = 3.291061e-024;
m[3201] = 0.000000e+000;
m[-3201] = 0.000000e+000;
m[3203] = 0.000000e+000;
m[-3203] = 0.000000e+000;
m[3214] = 1.828367e-023;
m[-3214] = 1.828367e-023;
m[3216] = 5.485102e-024;
m[-3216] = 5.485102e-024;
m[3218] = 3.656734e-024;
m[-3218] = 3.656734e-024;
m[3224] = 1.838582e-023;
m[-3224] = 1.838582e-023;
m[3226] = 5.485102e-024;
m[-3226] = 5.485102e-024;
m[3228] = 3.656734e-024;
m[-3228] = 3.656734e-024;
m[3303] = 0.000000e+000;
m[-3303] = 0.000000e+000;
m[3314] = 6.648608e-023;
m[-3314] = 6.648608e-023;
m[3324] = 7.233101e-023;
m[-3324] = 7.233101e-023;
m[4101] = 0.000000e+000;
m[-4101] = 0.000000e+000;
m[4103] = 0.000000e+000;
m[-4103] = 0.000000e+000;
m[4114] = 0.000000e+000;
m[-4114] = 0.000000e+000;
m[4201] = 0.000000e+000;
m[-4201] = 0.000000e+000;
m[4203] = 0.000000e+000;
m[-4203] = 0.000000e+000;
m[4214] = 3.291061e-022;
m[-4214] = 3.291061e-022;
m[4224] = 0.000000e+000;
m[-4224] = 0.000000e+000;
m[4301] = 0.000000e+000;
m[-4301] = 0.000000e+000;
m[4303] = 0.000000e+000;
m[-4303] = 0.000000e+000;
m[4314] = 0.000000e+000;
m[-4314] = 0.000000e+000;
m[4324] = 0.000000e+000;
m[-4324] = 0.000000e+000;
m[4334] = 0.000000e+000;
m[-4334] = 0.000000e+000;
m[4403] = 0.000000e+000;
m[-4403] = 0.000000e+000;
m[4412] = 3.335641e-013;
m[-4412] = 3.335641e-013;
m[4414] = 3.335641e-013;
m[-4414] = 3.335641e-013;
m[4422] = 3.335641e-013;
m[-4422] = 3.335641e-013;
m[4424] = 3.335641e-013;
m[-4424] = 3.335641e-013;
m[4432] = 3.335641e-013;
m[-4432] = 3.335641e-013;
m[4434] = 3.335641e-013;
m[-4434] = 3.335641e-013;
m[4444] = 3.335641e-013;
m[-4444] = 3.335641e-013;
m[5101] = 0.000000e+000;
m[-5101] = 0.000000e+000;
m[5103] = 0.000000e+000;
m[-5103] = 0.000000e+000;
m[5114] = 0.000000e+000;
m[-5114] = 0.000000e+000;
m[5142] = 1.290893e-012;
m[-5142] = 1.290893e-012;
m[5201] = 0.000000e+000;
m[-5201] = 0.000000e+000;
m[5203] = 0.000000e+000;
m[-5203] = 0.000000e+000;
m[5214] = 0.000000e+000;
m[-5214] = 0.000000e+000;
m[5224] = 0.000000e+000;
m[-5224] = 0.000000e+000;
m[5242] = 1.290893e-012;
m[-5242] = 1.290893e-012;
m[5301] = 0.000000e+000;
m[-5301] = 0.000000e+000;
m[5303] = 0.000000e+000;
m[-5303] = 0.000000e+000;
m[5314] = 0.000000e+000;
m[-5314] = 0.000000e+000;
m[5324] = 0.000000e+000;
m[-5324] = 0.000000e+000;
m[5334] = 0.000000e+000;
m[-5334] = 0.000000e+000;
m[5342] = 1.290893e-012;
m[-5342] = 1.290893e-012;
m[5401] = 0.000000e+000;
m[-5401] = 0.000000e+000;
m[5403] = 0.000000e+000;
m[-5403] = 0.000000e+000;
m[5412] = 1.290893e-012;
m[-5412] = 1.290893e-012;
m[5414] = 1.290893e-012;
m[-5414] = 1.290893e-012;
m[5422] = 1.290893e-012;
m[-5422] = 1.290893e-012;
m[5424] = 1.290893e-012;
m[-5424] = 1.290893e-012;
m[5432] = 1.290893e-012;
m[-5432] = 1.290893e-012;
m[5434] = 1.290893e-012;
m[-5434] = 1.290893e-012;
m[5442] = 1.290893e-012;
m[-5442] = 1.290893e-012;
m[5444] = 1.290893e-012;
m[-5444] = 1.290893e-012;
m[5503] = 0.000000e+000;
m[-5503] = 0.000000e+000;
m[5512] = 1.290893e-012;
m[-5512] = 1.290893e-012;
m[5514] = 1.290893e-012;
m[-5514] = 1.290893e-012;
m[5522] = 1.290893e-012;
m[-5522] = 1.290893e-012;
m[5524] = 1.290893e-012;
m[-5524] = 1.290893e-012;
m[5532] = 1.290893e-012;
m[-5532] = 1.290893e-012;
m[5534] = 1.290893e-012;
m[-5534] = 1.290893e-012;
m[5542] = 1.290893e-012;
m[-5542] = 1.290893e-012;
m[5544] = 1.290893e-012;
m[-5544] = 1.290893e-012;
m[5554] = 1.290893e-012;
m[-5554] = 1.290893e-012;
m[10022] = 0.000000e+000;
m[10111] = 2.483820e-024;
m[10113] = 4.635297e-024;
m[10115] = 2.541360e-024;
m[10211] = 2.483820e-024;
m[-10211] = 2.483820e-024;
m[10213] = 4.635297e-024;
m[-10213] = 4.635297e-024;
m[10215] = 2.541360e-024;
m[-10215] = 2.541360e-024;
m[9010221] = 1.316424e-023;
m[10223] = 1.828367e-024;
m[10225] = 0.000000e+000;
m[10315] = 3.538775e-024;
m[-10315] = 3.538775e-024;
m[10325] = 3.538775e-024;
m[-10325] = 3.538775e-024;
m[10331] = 5.265698e-024;
m[10333] = 0.000000e+000;
m[10335] = 0.000000e+000;
m[10443] = 0.000000e+000;
m[10541] = 0.000000e+000;
m[-10541] = 0.000000e+000;
m[10543] = 0.000000e+000;
m[-10543] = 0.000000e+000;
m[10551] = 1.000000e+016;
m[10553] = 0.000000e+000;
m[10555] = 0.000000e+000;
m[11112] = 0.000000e+000;
m[-11112] = 0.000000e+000;
m[11114] = 2.194041e-024;
m[-11114] = 2.194041e-024;
m[11116] = 1.880606e-024;
m[-11116] = 1.880606e-024;
m[11212] = 1.880606e-024;
m[-11212] = 1.880606e-024;
m[11216] = 0.000000e+000;
m[-11216] = 0.000000e+000;
m[12112] = 1.880606e-024;
m[-12112] = 1.880606e-024;
m[12114] = 2.194041e-024;
m[-12114] = 2.194041e-024;
m[12116] = 5.063171e-024;
m[-12116] = 5.063171e-024;
m[12118] = 0.000000e+000;
m[-12118] = 0.000000e+000;
m[12122] = 0.000000e+000;
m[-12122] = 0.000000e+000;
m[12126] = 1.880606e-024;
m[-12126] = 1.880606e-024;
m[12212] = 1.880606e-024;
m[-12212] = 1.880606e-024;
m[12214] = 2.194041e-024;
m[-12214] = 2.194041e-024;
m[12216] = 5.063171e-024;
m[-12216] = 5.063171e-024;
m[12218] = 0.000000e+000;
m[-12218] = 0.000000e+000;
m[12222] = 0.000000e+000;
m[-12222] = 0.000000e+000;
m[12224] = 2.194041e-024;
m[-12224] = 2.194041e-024;
m[12226] = 1.880606e-024;
m[-12226] = 1.880606e-024;
m[13112] = 6.582122e-024;
m[-13112] = 6.582122e-024;
m[13114] = 1.097020e-023;
m[-13114] = 1.097020e-023;
m[13116] = 5.485102e-024;
m[-13116] = 5.485102e-024;
m[13122] = 1.316424e-023;
m[-13122] = 1.316424e-023;
m[13124] = 1.097020e-023;
m[-13124] = 1.097020e-023;
m[13126] = 6.928549e-024;
m[-13126] = 6.928549e-024;
m[13212] = 6.582122e-024;
m[-13212] = 6.582122e-024;
m[13214] = 1.097020e-023;
m[-13214] = 1.097020e-023;
m[13216] = 5.485102e-024;
m[-13216] = 5.485102e-024;
m[13222] = 6.582122e-024;
m[-13222] = 6.582122e-024;
m[13224] = 1.097020e-023;
m[-13224] = 1.097020e-023;
m[13226] = 5.485102e-024;
m[-13226] = 5.485102e-024;
m[13314] = 2.742551e-023;
m[-13314] = 2.742551e-023;
m[13316] = 0.000000e+000;
m[-13316] = 0.000000e+000;
m[13324] = 2.742551e-023;
m[-13324] = 2.742551e-023;
m[13326] = 0.000000e+000;
m[-13326] = 0.000000e+000;
m[14122] = 1.828367e-022;
m[-14122] = 1.828367e-022;
m[14124] = 0.000000e+000;
m[-14124] = 0.000000e+000;
m[10221] = 2.194040e-024;
m[20223] = 2.742551e-023;
m[20315] = 2.384827e-024;
m[-20315] = 2.384827e-024;
m[20325] = 2.384827e-024;
m[-20325] = 2.384827e-024;
m[20333] = 1.185968e-023;
m[20543] = 0.000000e+000;
m[-20543] = 0.000000e+000;
m[20553] = 1.000000e+016;
m[20555] = 0.000000e+000;
m[21112] = 2.632849e-024;
m[-21112] = 2.632849e-024;
m[21114] = 3.291061e-024;
m[-21114] = 3.291061e-024;
m[21212] = 2.632849e-024;
m[-21212] = 2.632849e-024;
m[21214] = 6.582122e-024;
m[-21214] = 6.582122e-024;
m[22112] = 4.388081e-024;
m[-22112] = 4.388081e-024;
m[22114] = 3.291061e-024;
m[-22114] = 3.291061e-024;
m[22122] = 2.632849e-024;
m[-22122] = 2.632849e-024;
m[22124] = 6.582122e-024;
m[-22124] = 6.582122e-024;
m[22212] = 4.388081e-024;
m[-22212] = 4.388081e-024;
m[22214] = 3.291061e-024;
m[-22214] = 3.291061e-024;
m[22222] = 2.632849e-024;
m[-22222] = 2.632849e-024;
m[22224] = 3.291061e-024;
m[-22224] = 3.291061e-024;
m[23112] = 7.313469e-024;
m[-23112] = 7.313469e-024;
m[23114] = 2.991874e-024;
m[-23114] = 2.991874e-024;
m[23122] = 4.388081e-024;
m[-23122] = 4.388081e-024;
m[23124] = 6.582122e-024;
m[-23124] = 6.582122e-024;
m[23126] = 3.291061e-024;
m[-23126] = 3.291061e-024;
m[23212] = 7.313469e-024;
m[-23212] = 7.313469e-024;
m[23214] = 2.991874e-024;
m[-23214] = 2.991874e-024;
m[23222] = 7.313469e-024;
m[-23222] = 7.313469e-024;
m[23224] = 2.991874e-024;
m[-23224] = 2.991874e-024;
m[23314] = 0.000000e+000;
m[-23314] = 0.000000e+000;
m[23324] = 0.000000e+000;
m[-23324] = 0.000000e+000;
m[30113] = 2.742551e-024;
m[30213] = 2.742551e-024;
m[-30213] = 2.742551e-024;
m[30223] = 2.991874e-024;
m[30313] = 2.056913e-024;
m[-30313] = 2.056913e-024;
m[30323] = 2.056913e-024;
m[-30323] = 2.056913e-024;
m[30343] = 0.000000e+000;
m[-30343] = 0.000000e+000;
m[30353] = 0.000000e+000;
m[-30353] = 0.000000e+000;
m[30363] = 0.000000e+000;
m[-30363] = 0.000000e+000;
m[30411] = 0.000000e+000;
m[-30411] = 0.000000e+000;
m[30413] = 0.000000e+000;
m[-30413] = 0.000000e+000;
m[30421] = 0.000000e+000;
m[-30421] = 0.000000e+000;
m[30423] = 0.000000e+000;
m[-30423] = 0.000000e+000;
m[30443] = 2.789035e-023;
m[30553] = 0.000000e+000;
m[31114] = 1.880606e-024;
m[-31114] = 1.880606e-024;
m[31214] = 4.388081e-024;
m[-31214] = 4.388081e-024;
m[32112] = 4.388081e-024;
m[-32112] = 4.388081e-024;
m[32114] = 1.880606e-024;
m[-32114] = 1.880606e-024;
m[32124] = 4.388081e-024;
m[-32124] = 4.388081e-024;
m[32212] = 4.388081e-024;
m[-32212] = 4.388081e-024;
m[32214] = 1.880606e-024;
m[-32214] = 1.880606e-024;
m[32224] = 1.880606e-024;
m[-32224] = 1.880606e-024;
m[33122] = 1.880606e-023;
m[-33122] = 1.880606e-023;
m[33314] = 0.000000e+000;
m[-33314] = 0.000000e+000;
m[33324] = 0.000000e+000;
m[-33324] = 0.000000e+000;
m[41214] = 0.000000e+000;
m[-41214] = 0.000000e+000;
m[42112] = 6.582122e-024;
m[-42112] = 6.582122e-024;
m[42124] = 0.000000e+000;
m[-42124] = 0.000000e+000;
m[42212] = 6.582122e-024;
m[-42212] = 6.582122e-024;
m[43122] = 2.194041e-024;
m[-43122] = 2.194041e-024;
m[52114] = 0.000000e+000;
m[-52114] = 0.000000e+000;
m[52214] = 0.000000e+000;
m[-52214] = 0.000000e+000;
m[53122] = 4.388081e-024;
m[-53122] = 4.388081e-024;
m[100111] = 1.645531e-024;
m[100113] = 2.123265e-024;
m[100211] = 1.645531e-024;
m[-100211] = 1.645531e-024;
m[100213] = 2.123265e-024;
m[-100213] = 2.123265e-024;
m[100221] = 1.196749e-023;
m[100223] = 3.871836e-024;
m[100225] = 0.000000e+000;
m[100311] = 0.000000e+000;
m[-100311] = 0.000000e+000;
m[100313] = 2.837122e-024;
m[-100313] = 2.837122e-024;
m[100315] = 0.000000e+000;
m[-100315] = 0.000000e+000;
m[100321] = 0.000000e+000;
m[-100321] = 0.000000e+000;
m[100323] = 2.837122e-024;
m[-100323] = 2.837122e-024;
m[100325] = 0.000000e+000;
m[-100325] = 0.000000e+000;
m[100331] = 0.000000e+000;
m[100333] = 4.388081e-024;
m[100335] = 3.291061e-024;
m[100441] = 0.000000e+000;
m[100551] = 0.000000e+000;
m[100553] = 1.495937e-020;
m[100555] = 1.000000e+016;
m[100557] = 0.000000e+000;
m[110551] = 1.000000e+016;
m[110553] = 0.000000e+000;
m[110555] = 0.000000e+000;
m[120553] = 1.000000e+016;
m[120555] = 0.000000e+000;
m[130553] = 0.000000e+000;
m[200111] = 3.134344e-024;
m[200211] = 3.134344e-024;
m[-200211] = 3.134344e-024;
m[200551] = 0.000000e+000;
m[200553] = 2.502708e-020;
m[200555] = 0.000000e+000;
m[210551] = 0.000000e+000;
m[210553] = 0.000000e+000;
m[220553] = 0.000000e+000;
m[300553] = 4.701516e-023;
m[9000221] = 0.000000e+000;
m[9000443] = 1.265793e-023;
m[9000553] = 5.983747e-024;
m[9010443] = 8.438618e-024;
m[9010553] = 8.331800e-024;
m[9020221] = 6.038644e-024;
m[9020443] = 1.530726e-023;
m[9060225] = 4.388081e-024;
m[9070225] = 2.056913e-024;
m[1000001] = 0.000000e+000;
m[-1000001] = 0.000000e+000;
m[1000002] = 0.000000e+000;
m[-1000002] = 0.000000e+000;
m[1000003] = 0.000000e+000;
m[-1000003] = 0.000000e+000;
m[1000004] = 0.000000e+000;
m[-1000004] = 0.000000e+000;
m[1000005] = 0.000000e+000;
m[-1000005] = 0.000000e+000;
m[1000006] = 0.000000e+000;
m[-1000006] = 0.000000e+000;
m[1000011] = 0.000000e+000;
m[-1000011] = 0.000000e+000;
m[1000012] = 0.000000e+000;
m[-1000012] = 0.000000e+000;
m[1000013] = 0.000000e+000;
m[-1000013] = 0.000000e+000;
m[1000014] = 0.000000e+000;
m[-1000014] = 0.000000e+000;
m[1000015] = 0.000000e+000;
m[-1000015] = 0.000000e+000;
m[1000016] = 0.000000e+000;
m[-1000016] = 0.000000e+000;
m[1000021] = 0.000000e+000;
m[1000022] = 0.000000e+000;
m[1000023] = 0.000000e+000;
m[1000024] = 0.000000e+000;
m[-1000024] = 0.000000e+000;
m[1000025] = 0.000000e+000;
m[1000035] = 0.000000e+000;
m[1000037] = 0.000000e+000;
m[-1000037] = 0.000000e+000;
m[1000039] = 0.000000e+000;
m[2000001] = 0.000000e+000;
m[-2000001] = 0.000000e+000;
m[2000002] = 0.000000e+000;
m[-2000002] = 0.000000e+000;
m[2000003] = 0.000000e+000;
m[-2000003] = 0.000000e+000;
m[2000004] = 0.000000e+000;
m[-2000004] = 0.000000e+000;
m[2000005] = 0.000000e+000;
m[-2000005] = 0.000000e+000;
m[2000006] = 0.000000e+000;
m[-2000006] = 0.000000e+000;
m[2000011] = 0.000000e+000;
m[-2000011] = 0.000000e+000;
m[2000012] = 0.000000e+000;
m[-2000012] = 0.000000e+000;
m[2000013] = 0.000000e+000;
m[-2000013] = 0.000000e+000;
m[2000014] = 0.000000e+000;
m[-2000014] = 0.000000e+000;
m[2000015] = 0.000000e+000;
m[-2000015] = 0.000000e+000;
m[2000016] = 0.000000e+000;
m[-2000016] = 0.000000e+000;
m[3000111] = 0.000000e+000;
m[3000113] = 0.000000e+000;
m[3000211] = 0.000000e+000;
m[-3000211] = 0.000000e+000;
m[3000213] = 0.000000e+000;
m[-3000213] = 0.000000e+000;
m[3000221] = 0.000000e+000;
m[3000223] = 0.000000e+000;
m[3000331] = 0.000000e+000;
m[3100021] = 0.000000e+000;
m[3100111] = 0.000000e+000;
m[3100113] = 0.000000e+000;
m[3200111] = 0.000000e+000;
m[3200113] = 0.000000e+000;
m[3300113] = 0.000000e+000;
m[3400113] = 0.000000e+000;
m[4000001] = 0.000000e+000;
m[-4000001] = 0.000000e+000;
m[4000002] = 0.000000e+000;
m[-4000002] = 0.000000e+000;
m[4000011] = 0.000000e+000;
m[-4000011] = 0.000000e+000;
m[4000012] = 0.000000e+000;
m[-4000012] = 0.000000e+000;
m[5000039] = 0.000000e+000;
m[9900012] = 0.000000e+000;
m[9900014] = 0.000000e+000;
m[9900016] = 0.000000e+000;
m[9900023] = 0.000000e+000;
m[9900024] = 0.000000e+000;
m[-9900024] = 0.000000e+000;
m[9900041] = 0.000000e+000;
m[-9900041] = 0.000000e+000;
m[9900042] = 0.000000e+000;
m[-9900042] = 0.000000e+000;
m[1027013000] = 0.000000e+000;
m[1012006000] = 0.000000e+000;
m[1063029000] = 0.000000e+000;
m[1014007000] = 0.000000e+000;
m[1016008000] = 0.000000e+000;
m[1028014000] = 0.000000e+000;
m[1065029000] = 0.000000e+000;
m[1009004000] = 0.000000e+000;
m[1019009000] = 0.000000e+000;
m[1056026000] = 0.000000e+000;
m[1207082000] = 0.000000e+000;
m[1208082000] = 0.000000e+000;
m[1029014000] = 0.000000e+000;
m[1206082000] = 0.000000e+000;
m[1054026000] = 0.000000e+000;
m[1018008000] = 0.000000e+000;
m[1030014000] = 0.000000e+000;
m[1057026000] = 0.000000e+000;
m[1204082000] = 0.000000e+000;
m[-99000000] = 0.000000e+000;
m[1028013000] = 0.000000e+000;
m[1040018000] = 0.000000e+000;
m[1011005000] = 0.000000e+000;
m[1012005000] = 0.000000e+000;
m[1013006000] = 0.000000e+000;
m[1014006000] = 0.000000e+000;
m[1052024000] = 0.000000e+000;
m[1024012000] = 0.000000e+000;
m[1026012000] = 0.000000e+000;
m[1027012000] = 0.000000e+000;
m[1015007000] = 0.000000e+000;
m[1022010000] = 0.000000e+000;
m[1058028000] = 0.000000e+000;
m[1060028000] = 0.000000e+000;
m[1062028000] = 0.000000e+000;
m[1064028000] = 0.000000e+000;
m[1007003000] = 0.000000e+000;
m[1025012000] = 0.000000e+000;
m[1053024000] = 0.000000e+000;
m[1055025000] = 0.000000e+000;
m[1008004000] = 0.000000e+000;
m[1010004000] = 0.000000e+000;
m[1010005000] = 0.000000e+000;
m[1016007000] = 0.000000e+000;
m[1017008000] = 0.000000e+000;
m[1019008000] = 0.000000e+000;
m[1023010000] = 0.000000e+000;
m[1024011000] = 0.000000e+000;
m[1031015000] = 0.000000e+000;
m[1039017000] = 0.000000e+000;
m[1040017000] = 0.000000e+000;
m[1036018000] = 0.000000e+000;
m[1050024000] = 0.000000e+000;
m[1054024000] = 0.000000e+000;
m[1059026000] = 0.000000e+000;
m[1061028000] = 0.000000e+000;
m[1063028000] = 0.000000e+000;
m[1092042000] = 0.000000e+000;
m[1095042000] = 0.000000e+000;
m[1096042000] = 0.000000e+000;
m[1097042000] = 0.000000e+000;
m[1098042000] = 0.000000e+000;
m[1100042000] = 0.000000e+000;
m[1108046000] = 0.000000e+000;
// Added by hand:
m[9902210] = 0.000000e+000; //diffractive p-state -> assume no lifetime
return true;
}
private:
/// @name Histograms
//@{
Histo1DPtr _h_mult_total; // full kinematic range
Histo1DPtr _h_mult_eta[5]; // in eta bins
Histo1DPtr _h_mult_pt[5]; // in pT bins
Histo1DPtr _h_dndeta; // density dn/deta
Histo1DPtr _h_dndpt; // density dn/dpT
//@}
/// @name Private variables
double _p_min;
double _pt_min;
double _eta_min;
double _eta_max;
double _maxlft;
/// Count selected events
- double _sumW;
+ CounterPtr _sumW;
map<int, double> _partLftMap; // Map <PDGID, PDGLIFETIME>
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2014_I1281685);
}
diff --git a/analyses/pluginLHCb/LHCB_2015_I1333223.cc b/analyses/pluginLHCb/LHCB_2015_I1333223.cc
--- a/analyses/pluginLHCb/LHCB_2015_I1333223.cc
+++ b/analyses/pluginLHCb/LHCB_2015_I1333223.cc
@@ -1,113 +1,112 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Math/Units.hh"
#include <vector>
using namespace std;
namespace Rivet {
class LHCB_2015_I1333223 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2015_I1333223()
: Analysis("LHCB_2015_I1333223")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Charged particles
declare(ChargedFinalState(Cuts::eta> 2.0 && Cuts::eta <4.5 && Cuts::pT >0.2*GeV), "CFS");
// Reproducing only measurement for prompt charged particles
book(_hInelasticXs ,1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState &cfs = apply<ChargedFinalState> (event, "CFS");
// eliminate non-inelastic events and empty events in LHCb
if (cfs.particles().size() == 0) vetoEvent;
// See if this event has at least one prompt particle
foreach (const Particle &myp, cfs.particles()){
double dPV = getPVDCA(myp);
// if IP > 200 microns the particle is not considered prompt
if ((dPV < 0.) || (dPV > 0.2 * millimeter)) {
MSG_DEBUG(" Vetoing " << myp.pdgId() << " at " << dPV);
continue;
}
// histo gets filled only for inelastic events (at least one prompt charged particle)
- _hInelasticXs->fill(sqrtS(), weight);
+ _hInelasticXs->fill(sqrtS());
break;
} //end loop on particles
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_hInelasticXs, crossSection()/sumOfWeights()/millibarn);
}
//@}
private:
/*
* Compute Distance of Closest Approach in z range for one particle.
* Assuming length unit is mm.
* Returns -1. if unable to compute the DCA to PV.
*/
double getPVDCA(const Particle& p) {
const HepMC::GenVertex* vtx = p.genParticle()->production_vertex();
if ( 0 == vtx ) return -1.;
// Unit vector of particle's MOMENTUM three vector
const Vector3 u = p.momentum().p3().unit();
// The interaction point is always at (0, 0,0,0) hence the
// vector pointing from the PV to the particle production vertex is:
Vector3 d(vtx->position().x(), vtx->position().y(), vtx->position().z());
// Subtract projection of d onto u from d
double proj = d.dot(u);
d -= (u * proj);
// d should be orthogonal to u and it's length give the distance of
// closest approach
return d.mod();
}
/// @name Histograms
//@{
Histo1DPtr _hInelasticXs;
//@}
//
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2015_I1333223);
}
diff --git a/analyses/pluginMC/MC_WWINC.cc b/analyses/pluginMC/MC_WWINC.cc
--- a/analyses/pluginMC/MC_WWINC.cc
+++ b/analyses/pluginMC/MC_WWINC.cc
@@ -1,184 +1,183 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/WFinder.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
namespace Rivet {
/// @brief MC validation analysis for W^+[enu]W^-[munu] events
class MC_WWINC : public Analysis {
public:
/// Default constructor
MC_WWINC()
: Analysis("MC_WWINC")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
FinalState fs;
WFinder wenufinder(fs, Cuts::abseta < 3.5 && Cuts::pT > 25*GeV, PID::ELECTRON, 60.0*GeV, 100.0*GeV, 25.0*GeV, 0.2);
declare(wenufinder, "WenuFinder");
VetoedFinalState wmnuinput;
wmnuinput.addVetoOnThisFinalState(wenufinder);
WFinder wmnufinder(wmnuinput, Cuts::abseta < 3.5 && Cuts::pT > 25*GeV, PID::MUON, 60.0*GeV, 100.0*GeV, 25.0*GeV, 0.2);
declare(wmnufinder, "WmnuFinder");
// properties of the pair momentum
double sqrts = sqrtS()>0. ? sqrtS() : 14000.;
- book(_h_WW_pT ,"WW_pT", logspace(100, 1.0, 0.5*sqrts));
+ book(_h_WW_pT ,"WW_pT", logspace(100, 1.0, max(1.1,0.5*sqrts)));
book(_h_WW_pT_peak ,"WW_pT_peak", 25, 0.0, 25.0);
book(_h_WW_eta ,"WW_eta", 40, -7.0, 7.0);
book(_h_WW_phi ,"WW_phi", 25, 0.0, TWOPI);
book(_h_WW_m ,"WW_m", logspace(100, 150.0, 180.0+0.25*sqrts));
// correlations between the WW
book(_h_WW_dphi ,"WW_dphi", 25, 0.0, PI); /// @todo non-linear?
book(_h_WW_deta ,"WW_deta", 25, -7.0, 7.0);
book(_h_WW_dR ,"WW_dR", 25, 0.5, 7.0);
- book(_h_WW_dpT ,"WW_dpT", logspace(100, 1.0, 0.5*sqrts));
+ book(_h_WW_dpT ,"WW_dpT", logspace(100, 1.0, max(1.1,0.5*sqrts)));
book(_h_WW_costheta_planes ,"WW_costheta_planes", 25, -1.0, 1.0);
/// @todo fuer WW: missing ET
// properties of the W bosons
- book(_h_W_pT ,"W_pT", logspace(100, 10.0, 0.25*sqrts));
+ book(_h_W_pT ,"W_pT", logspace(100, 10.0, max(11.,0.25*sqrts)));
book(_h_W_eta ,"W_eta", 70, -7.0, 7.0);
// properties of the leptons
- book(_h_Wl_pT ,"Wl_pT", logspace(100, 30.0, 0.1
- *sqrts));
+ book(_h_Wl_pT ,"Wl_pT", logspace(100, 30.0, max(31., 0.1*sqrts)));
book(_h_Wl_eta ,"Wl_eta", 40, -3.5, 3.5);
// correlations between the opposite charge leptons
book(_h_WeWm_dphi ,"WeWm_dphi", 25, 0.0, PI);
book(_h_WeWm_deta ,"WeWm_deta", 25, -5.0, 5.0);
book(_h_WeWm_dR ,"WeWm_dR", 25, 0.5, 5.0);
book(_h_WeWm_m ,"WeWm_m", 100, 0.0, 300.0);
}
/// Do the analysis
void analyze(const Event & e) {
const double weight = 1.0;
const WFinder& wenufinder = apply<WFinder>(e, "WenuFinder");
if (wenufinder.bosons().size()!=1) {
vetoEvent;
}
const WFinder& wmnufinder = apply<WFinder>(e, "WmnuFinder");
if (wmnufinder.bosons().size()!=1) {
vetoEvent;
}
FourMomentum wenu(wenufinder.bosons()[0].momentum());
FourMomentum wmnu(wmnufinder.bosons()[0].momentum());
FourMomentum ww(wenu+wmnu);
// find leptons
FourMomentum ep=wenufinder.constituentLeptons()[0].momentum();
FourMomentum enu=wenufinder.constituentNeutrinos()[0].momentum();
FourMomentum mm=wmnufinder.constituentLeptons()[0].momentum();
FourMomentum mnu=wmnufinder.constituentNeutrinos()[0].momentum();
_h_WW_pT->fill(ww.pT(),weight);
_h_WW_pT_peak->fill(ww.pT(),weight);
_h_WW_eta->fill(ww.eta(),weight);
_h_WW_phi->fill(ww.phi(),weight);
double mww2=ww.mass2();
if (mww2>0.0) _h_WW_m->fill(sqrt(mww2), weight);
_h_WW_dphi->fill(mapAngle0ToPi(wenu.phi()-wmnu.phi()), weight);
_h_WW_deta->fill(wenu.eta()-wmnu.eta(), weight);
_h_WW_dR->fill(deltaR(wenu,wmnu), weight);
_h_WW_dpT->fill(fabs(wenu.pT()-wmnu.pT()), weight);
Vector3 crossWenu = ep.p3().cross(enu.p3());
Vector3 crossWmnu = mm.p3().cross(mnu.p3());
double costheta = crossWenu.dot(crossWmnu)/crossWenu.mod()/crossWmnu.mod();
_h_WW_costheta_planes->fill(costheta, weight);
_h_W_pT->fill(wenu.pT(),weight);
_h_W_pT->fill(wmnu.pT(),weight);
_h_W_eta->fill(wenu.eta(),weight);
_h_W_eta->fill(wmnu.eta(),weight);
_h_Wl_pT->fill(ep.pT(), weight);
_h_Wl_pT->fill(mm.pT(), weight);
_h_Wl_eta->fill(ep.eta(), weight);
_h_Wl_eta->fill(mm.eta(), weight);
_h_WeWm_dphi->fill(mapAngle0ToPi(ep.phi()-mm.phi()), weight);
_h_WeWm_deta->fill(ep.eta()-mm.eta(), weight);
_h_WeWm_dR->fill(deltaR(ep,mm), weight);
double m2=FourMomentum(ep+mm).mass2();
if (m2 < 0) m2 = 0.0;
_h_WeWm_m->fill(sqrt(m2), weight);
}
/// Finalize
void finalize() {
const double norm = crossSection()/picobarn/sumOfWeights();
scale(_h_WW_pT, norm);
scale(_h_WW_pT_peak, norm);
scale(_h_WW_eta, norm);
scale(_h_WW_phi, norm);
scale(_h_WW_m, norm);
scale(_h_WW_dphi, norm);
scale(_h_WW_deta, norm);
scale(_h_WW_dR, norm);
scale(_h_WW_dpT, norm);
scale(_h_WW_costheta_planes, norm);
scale(_h_W_pT, norm);
scale(_h_W_eta, norm);
scale(_h_Wl_pT, norm);
scale(_h_Wl_eta, norm);
scale(_h_WeWm_dphi, norm);
scale(_h_WeWm_deta, norm);
scale(_h_WeWm_dR, norm);
scale(_h_WeWm_m, norm);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_WW_pT;
Histo1DPtr _h_WW_pT_peak;
Histo1DPtr _h_WW_eta;
Histo1DPtr _h_WW_phi;
Histo1DPtr _h_WW_m;
Histo1DPtr _h_WW_dphi;
Histo1DPtr _h_WW_deta;
Histo1DPtr _h_WW_dR;
Histo1DPtr _h_WW_dpT;
Histo1DPtr _h_WW_costheta_planes;
Histo1DPtr _h_W_pT;
Histo1DPtr _h_W_eta;
Histo1DPtr _h_Wl_pT;
Histo1DPtr _h_Wl_eta;
Histo1DPtr _h_WeWm_dphi;
Histo1DPtr _h_WeWm_deta;
Histo1DPtr _h_WeWm_dR;
Histo1DPtr _h_WeWm_m;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(MC_WWINC);
}
diff --git a/analyses/pluginMisc/ARGUS_1993_S2653028.cc b/analyses/pluginMisc/ARGUS_1993_S2653028.cc
--- a/analyses/pluginMisc/ARGUS_1993_S2653028.cc
+++ b/analyses/pluginMisc/ARGUS_1993_S2653028.cc
@@ -1,177 +1,175 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BELLE pi+/-, K+/- and proton/antiproton spectrum at Upsilon(4S)
/// @author Peter Richardson
class ARGUS_1993_S2653028 : public Analysis {
public:
ARGUS_1993_S2653028()
- : Analysis("ARGUS_1993_S2653028"),
- _weightSum(0.)
- { }
+ : Analysis("ARGUS_1993_S2653028"){ }
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Find the upsilons
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
if (p.pid() == 300553) upsilons.push_back(p);
}
// Then in whole event if that failed
if (upsilons.empty()) {
foreach (const GenParticle* p, particles(e.genEvent())) {
if (p->pdg_id() != 300553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
// Find an upsilon
foreach (const Particle& p, upsilons) {
- _weightSum += weight;
+ _weightSum->fill();
vector<GenParticle *> pionsA,pionsB,protonsA,protonsB,kaons;
// Find the decay products we want
findDecayProducts(p.genParticle(), pionsA, pionsB, protonsA, protonsB, kaons);
LorentzTransform cms_boost;
if (p.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
for (size_t ix = 0; ix < pionsA.size(); ++ix) {
FourMomentum ptemp(pionsA[ix]->momentum());
FourMomentum p2 = cms_boost.transform(ptemp);
double pcm = cms_boost.transform(ptemp).vector3().mod();
- _histPiA->fill(pcm,weight);
+ _histPiA->fill(pcm);
}
- _multPiA->fill(10.58,double(pionsA.size())*weight);
+ _multPiA->fill(10.58,double(pionsA.size()));
for (size_t ix = 0; ix < pionsB.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(pionsB[ix]->momentum())).vector3().mod();
- _histPiB->fill(pcm,weight);
+ _histPiB->fill(pcm);
}
- _multPiB->fill(10.58,double(pionsB.size())*weight);
+ _multPiB->fill(10.58,double(pionsB.size()));
for (size_t ix = 0; ix < protonsA.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(protonsA[ix]->momentum())).vector3().mod();
- _histpA->fill(pcm,weight);
+ _histpA->fill(pcm);
}
- _multpA->fill(10.58,double(protonsA.size())*weight);
+ _multpA->fill(10.58,double(protonsA.size()));
for (size_t ix = 0; ix < protonsB.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(protonsB[ix]->momentum())).vector3().mod();
- _histpB->fill(pcm,weight);
+ _histpB->fill(pcm);
}
- _multpB->fill(10.58,double(protonsB.size())*weight);
+ _multpB->fill(10.58,double(protonsB.size()));
for (size_t ix = 0 ;ix < kaons.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(kaons[ix]->momentum())).vector3().mod();
- _histKA->fill(pcm,weight);
- _histKB->fill(pcm,weight);
+ _histKA->fill(pcm);
+ _histKB->fill(pcm);
}
- _multK->fill(10.58,double(kaons.size())*weight);
+ _multK->fill(10.58,double(kaons.size()));
}
}
void finalize() {
if (_weightSum > 0.) {
scale(_histPiA, 1./_weightSum);
scale(_histPiB, 1./_weightSum);
scale(_histKA , 1./_weightSum);
scale(_histKB , 1./_weightSum);
scale(_histpA , 1./_weightSum);
scale(_histpB , 1./_weightSum);
scale(_multPiA, 1./_weightSum);
scale(_multPiB, 1./_weightSum);
scale(_multK , 1./_weightSum);
scale(_multpA , 1./_weightSum);
scale(_multpB , 1./_weightSum);
}
}
void init() {
declare(UnstableFinalState(), "UFS");
// spectra
book(_histPiA ,1, 1, 1);
book(_histPiB ,2, 1, 1);
book(_histKA ,3, 1, 1);
book(_histKB ,6, 1, 1);
book(_histpA ,4, 1, 1);
book(_histpB ,5, 1, 1);
// multiplicities
book(_multPiA , 7, 1, 1);
book(_multPiB , 8, 1, 1);
book(_multK , 9, 1, 1);
book(_multpA ,10, 1, 1);
book(_multpB ,11, 1, 1);
+
+ book(_weightSum, "TMP/weightSum");
} // init
private:
//@{
/// Count of weights
- double _weightSum;
+ CounterPtr _weightSum;
/// Spectra
Histo1DPtr _histPiA, _histPiB, _histKA, _histKB, _histpA, _histpB;
/// Multiplicities
Histo1DPtr _multPiA, _multPiB, _multK, _multpA, _multpB;
//@}
void findDecayProducts(const GenParticle* p,
vector<GenParticle*>& pionsA, vector<GenParticle*>& pionsB,
vector<GenParticle*>& protonsA, vector<GenParticle*>& protonsB,
vector<GenParticle*>& kaons)
{
int parentId = p->pdg_id();
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = abs((*pp)->pdg_id());
if (id == PID::PIPLUS) {
if (parentId != PID::LAMBDA && parentId != PID::K0S) {
pionsA.push_back(*pp);
pionsB.push_back(*pp);
}
else
pionsB.push_back(*pp);
}
else if (id == PID::PROTON) {
if (parentId != PID::LAMBDA && parentId != PID::K0S) {
protonsA.push_back(*pp);
protonsB.push_back(*pp);
}
else
protonsB.push_back(*pp);
}
else if (id == PID::KPLUS) {
kaons.push_back(*pp);
}
else if ((*pp)->end_vertex())
findDecayProducts(*pp, pionsA, pionsB, protonsA, protonsB, kaons);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ARGUS_1993_S2653028);
}
diff --git a/analyses/pluginMisc/ARGUS_1993_S2669951.cc b/analyses/pluginMisc/ARGUS_1993_S2669951.cc
--- a/analyses/pluginMisc/ARGUS_1993_S2669951.cc
+++ b/analyses/pluginMisc/ARGUS_1993_S2669951.cc
@@ -1,199 +1,199 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Production of the $\eta'(958)$ and $f_0(980)$ in $e^+e^-$ annihilation in the Upsilon region
/// @author Peter Richardson
class ARGUS_1993_S2669951 : public Analysis {
public:
ARGUS_1993_S2669951()
: Analysis("ARGUS_1993_S2669951")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_weightSum_cont, "weightSum_cont");
book(_weightSum_Ups1, "weightSum_Ups1");
book(_weightSum_Ups2, "weightSum_Ups2");
for ( auto i : {0,1,2} ) {
if ( i < 2 )
book(_count_etaPrime_highZ[i], "count_etaPrime_highz_" + to_str(i));
book(_count_etaPrime_allZ[i], "count_etaPrime_allz_" + to_str(i));
book(_count_f0[i], "count_f0_" + to_str(i));
}
book(_hist_cont_f0 ,2, 1, 1);
book(_hist_Ups1_f0 ,3, 1, 1);
book(_hist_Ups2_f0 ,4, 1, 1);
+
+ book(s111, 1, 1, 1, true);
+ book(s112, 1, 1, 2, true);
+ book(s511, 5, 1, 1, true);
+
}
void analyze(const Event& e) {
// Find the Upsilons among the unstables
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
Particles upsilons;
// First in unstable final state
foreach (const Particle& p, ufs.particles())
if (p.pid() == 553 || p.pid() == 100553)
upsilons.push_back(p);
// Then in whole event if fails
if (upsilons.empty()) {
/// @todo Replace HepMC digging with Particle::descendents etc. calls
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if ( p->pdg_id() != 553 && p->pdg_id() != 100553 ) continue;
// Discard it if its parent has the same PDG ID code (avoid duplicates)
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
// Finding done, now fill counters
if (upsilons.empty()) { // Continuum
MSG_DEBUG("No Upsilons found => continuum event");
_weightSum_cont->fill();
unsigned int nEtaA(0), nEtaB(0), nf0(0);
foreach (const Particle& p, ufs.particles()) {
const int id = p.abspid();
const double xp = 2.*p.E()/sqrtS();
const double beta = p.p3().mod() / p.E();
if (id == 9010221) {
_hist_cont_f0->fill(xp, 1.0/beta);
nf0 += 1;
} else if (id == 331) {
if (xp > 0.35) nEtaA += 1;
nEtaB += 1;
}
}
_count_f0[2] ->fill(nf0);
_count_etaPrime_highZ[1]->fill(nEtaA);
_count_etaPrime_allZ[2] ->fill(nEtaB);
} else { // Upsilon(s) found
MSG_DEBUG("Upsilons found => resonance event");
foreach (const Particle& ups, upsilons) {
const int parentId = ups.pid();
((parentId == 553) ? _weightSum_Ups1 : _weightSum_Ups2)->fill();
Particles unstable;
// Find the decay products we want
findDecayProducts(ups.genParticle(), unstable);
LorentzTransform cms_boost;
if (ups.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec());
const double mass = ups.mass();
unsigned int nEtaA(0), nEtaB(0), nf0(0);
foreach(const Particle& p, unstable) {
const int id = p.abspid();
const FourMomentum p2 = cms_boost.transform(p.momentum());
const double xp = 2.*p2.E()/mass;
const double beta = p2.p3().mod()/p2.E();
if (id == 9010221) { //< ?
((parentId == 553) ? _hist_Ups1_f0 : _hist_Ups2_f0)->fill(xp, 1.0/beta);
nf0 += 1;
} else if (id == 331) { //< ?
if (xp > 0.35) nEtaA += 1;
nEtaB += 1;
}
}
if (parentId == 553) {
_count_f0[0] ->fill( nf0);
_count_etaPrime_highZ[0]->fill(nEtaA);
_count_etaPrime_allZ[0] ->fill(nEtaB);
} else {
_count_f0[1]->fill(nf0);
_count_etaPrime_allZ[1] ->fill(nEtaB);
}
}
}
}
void finalize() {
// High-Z eta' multiplicity
- Scatter2DPtr s111;
- book(s111, 1, 1, 1, true);
if (_weightSum_Ups1 > 0) // Point at 9.460
s111->point(0).setY(_count_etaPrime_highZ[0] / _weightSum_Ups1, 0);
if (_weightSum_cont > 0) // Point at 9.905
s111->point(1).setY(_count_etaPrime_highZ[1] / _weightSum_cont, 0);
// All-Z eta' multiplicity
- Scatter2DPtr s112;
- book(s112, 1, 1, 2, true);
if (_weightSum_Ups1 > 0) // Point at 9.460
s112->point(0).setY(_count_etaPrime_allZ[0] / _weightSum_Ups1, 0);
if (_weightSum_cont > 0) // Point at 9.905
s112->point(1).setY(_count_etaPrime_allZ[2] / _weightSum_cont, 0);
if (_weightSum_Ups2 > 0) // Point at 10.02
s112->point(2).setY(_count_etaPrime_allZ[1] / _weightSum_Ups2, 0);
// f0 multiplicity
- Scatter2DPtr s511;
- book(s511, 5, 1, 1, true);
if (_weightSum_Ups1 > 0) // Point at 9.46
s511->point(0).setY(_count_f0[0] / _weightSum_Ups1, 0);
if (_weightSum_Ups2 > 0) // Point at 10.02
s511->point(1).setY(_count_f0[1] / _weightSum_Ups2, 0);
if (_weightSum_cont > 0) // Point at 10.45
s511->point(2).setY(_count_f0[2] / _weightSum_cont, 0);
// Scale histos
if (_weightSum_cont > 0.) scale(_hist_cont_f0, 1./_weightSum_cont);
if (_weightSum_Ups1 > 0.) scale(_hist_Ups1_f0, 1./_weightSum_Ups1);
if (_weightSum_Ups2 > 0.) scale(_hist_Ups2_f0, 1./_weightSum_Ups2);
}
private:
/// @name Counters
//@{
- vector<CounterPtr> _count_etaPrime_highZ, _count_etaPrime_allZ, _count_f0;
+ array<CounterPtr,3> _count_etaPrime_highZ, _count_etaPrime_allZ, _count_f0;
CounterPtr _weightSum_cont,_weightSum_Ups1,_weightSum_Ups2;
//@}
+ Scatter2DPtr s111, s112, s511;
/// Histos
Histo1DPtr _hist_cont_f0, _hist_Ups1_f0, _hist_Ups2_f0;
/// Recursively walk the HepMC tree to find decay products of @a p
void findDecayProducts(const GenParticle* p, Particles& unstable) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
const int id = abs((*pp)->pdg_id());
if (id == 331 || id == 9010221) unstable.push_back(Particle(*pp));
else if ((*pp)->end_vertex()) findDecayProducts(*pp, unstable);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ARGUS_1993_S2669951);
}
diff --git a/analyses/pluginMisc/ARGUS_1993_S2789213.cc b/analyses/pluginMisc/ARGUS_1993_S2789213.cc
--- a/analyses/pluginMisc/ARGUS_1993_S2789213.cc
+++ b/analyses/pluginMisc/ARGUS_1993_S2789213.cc
@@ -1,256 +1,258 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief ARGUS vector meson production
/// @author Peter Richardson
class ARGUS_1993_S2789213 : public Analysis {
public:
ARGUS_1993_S2789213()
- : Analysis("ARGUS_1993_S2789213"),
- _weightSum_cont(0.),_weightSum_Ups1(0.),_weightSum_Ups4(0.)
+ : Analysis("ARGUS_1993_S2789213")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_mult_cont_Omega , 1, 1, 1);
book(_mult_cont_Rho0 , 1, 1, 2);
book(_mult_cont_KStar0 , 1, 1, 3);
book(_mult_cont_KStarPlus , 1, 1, 4);
book(_mult_cont_Phi , 1, 1, 5);
book(_mult_Ups1_Omega , 2, 1, 1);
book(_mult_Ups1_Rho0 , 2, 1, 2);
book(_mult_Ups1_KStar0 , 2, 1, 3);
book(_mult_Ups1_KStarPlus , 2, 1, 4);
book(_mult_Ups1_Phi , 2, 1, 5);
book(_mult_Ups4_Omega , 3, 1, 1);
book(_mult_Ups4_Rho0 , 3, 1, 2);
book(_mult_Ups4_KStar0 , 3, 1, 3);
book(_mult_Ups4_KStarPlus , 3, 1, 4);
book(_mult_Ups4_Phi , 3, 1, 5);
book(_hist_cont_KStarPlus , 4, 1, 1);
book(_hist_Ups1_KStarPlus , 5, 1, 1);
book(_hist_Ups4_KStarPlus , 6, 1, 1);
book(_hist_cont_KStar0 , 7, 1, 1);
book(_hist_Ups1_KStar0 , 8, 1, 1);
book(_hist_Ups4_KStar0 , 9, 1, 1);
book(_hist_cont_Rho0 ,10, 1, 1);
book(_hist_Ups1_Rho0 ,11, 1, 1);
book(_hist_Ups4_Rho0 ,12, 1, 1);
book(_hist_cont_Omega ,13, 1, 1);
book(_hist_Ups1_Omega ,14, 1, 1);
+
+
+ book(_weightSum_cont,"TMP/weightSumcont");
+ book(_weightSum_Ups1,"TMP/weightSumUps1");
+ book(_weightSum_Ups4,"TMP/weightSumUps4");
}
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Find the upsilons
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles())
if (p.pid() == 300553 || p.pid() == 553) upsilons.push_back(p);
// Then in whole event if that failed
if (upsilons.empty()) {
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if (p->pdg_id() != 300553 && p->pdg_id() != 553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
if (upsilons.empty()) { // continuum
- _weightSum_cont += weight;
+ _weightSum_cont->fill();
unsigned int nOmega(0), nRho0(0), nKStar0(0), nKStarPlus(0), nPhi(0);
foreach (const Particle& p, ufs.particles()) {
int id = p.abspid();
double xp = 2.*p.E()/sqrtS();
double beta = p.p3().mod()/p.E();
if (id == 113) {
- _hist_cont_Rho0->fill(xp, weight/beta);
+ _hist_cont_Rho0->fill(xp, 1.0/beta);
++nRho0;
}
else if (id == 313) {
- _hist_cont_KStar0->fill(xp, weight/beta);
+ _hist_cont_KStar0->fill(xp, 1.0/beta);
++nKStar0;
}
else if (id == 223) {
- _hist_cont_Omega->fill(xp, weight/beta);
+ _hist_cont_Omega->fill(xp, 1.0/beta);
++nOmega;
}
else if (id == 323) {
- _hist_cont_KStarPlus->fill(xp,weight/beta);
+ _hist_cont_KStarPlus->fill(xp,1.0/beta);
++nKStarPlus;
}
else if (id == 333) {
++nPhi;
}
}
/// @todo Replace with Counters and fill one-point Scatters at the end
- _mult_cont_Omega ->fill(10.45, weight*nOmega );
- _mult_cont_Rho0 ->fill(10.45, weight*nRho0 );
- _mult_cont_KStar0 ->fill(10.45, weight*nKStar0 );
- _mult_cont_KStarPlus->fill(10.45, weight*nKStarPlus);
- _mult_cont_Phi ->fill(10.45, weight*nPhi );
+ _mult_cont_Omega ->fill(10.45, nOmega );
+ _mult_cont_Rho0 ->fill(10.45, nRho0 );
+ _mult_cont_KStar0 ->fill(10.45, nKStar0 );
+ _mult_cont_KStarPlus->fill(10.45, nKStarPlus);
+ _mult_cont_Phi ->fill(10.45, nPhi );
} else { // found an upsilon
foreach (const Particle& ups, upsilons) {
const int parentId = ups.pid();
- (parentId == 553 ? _weightSum_Ups1 : _weightSum_Ups4) += weight;
+ (parentId == 553 ? _weightSum_Ups1 : _weightSum_Ups4)->fill();
Particles unstable;
// Find the decay products we want
findDecayProducts(ups.genParticle(),unstable);
/// @todo Update to new LT mk* functions
LorentzTransform cms_boost;
if (ups.p3().mod() > 0.001)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec());
double mass = ups.mass();
unsigned int nOmega(0),nRho0(0),nKStar0(0),nKStarPlus(0),nPhi(0);
foreach(const Particle & p , unstable) {
int id = p.abspid();
FourMomentum p2 = cms_boost.transform(p.momentum());
double xp = 2.*p2.E()/mass;
double beta = p2.p3().mod()/p2.E();
if (id == 113) {
- if (parentId == 553) _hist_Ups1_Rho0->fill(xp,weight/beta);
- else _hist_Ups4_Rho0->fill(xp,weight/beta);
+ if (parentId == 553) _hist_Ups1_Rho0->fill(xp,1.0/beta);
+ else _hist_Ups4_Rho0->fill(xp,1.0/beta);
++nRho0;
}
else if (id == 313) {
- if (parentId == 553) _hist_Ups1_KStar0->fill(xp,weight/beta);
- else _hist_Ups4_KStar0->fill(xp,weight/beta);
+ if (parentId == 553) _hist_Ups1_KStar0->fill(xp,1.0/beta);
+ else _hist_Ups4_KStar0->fill(xp,1.0/beta);
++nKStar0;
}
else if (id == 223) {
- if (parentId == 553) _hist_Ups1_Omega->fill(xp,weight/beta);
+ if (parentId == 553) _hist_Ups1_Omega->fill(xp,1.0/beta);
++nOmega;
}
else if (id == 323) {
- if (parentId == 553) _hist_Ups1_KStarPlus->fill(xp,weight/beta);
- else _hist_Ups4_KStarPlus->fill(xp,weight/beta);
+ if (parentId == 553) _hist_Ups1_KStarPlus->fill(xp,1.0/beta);
+ else _hist_Ups4_KStarPlus->fill(xp,1.0/beta);
++nKStarPlus;
}
else if (id == 333) {
++nPhi;
}
}
if (parentId == 553) {
- _mult_Ups1_Omega ->fill(9.46,weight*nOmega );
- _mult_Ups1_Rho0 ->fill(9.46,weight*nRho0 );
- _mult_Ups1_KStar0 ->fill(9.46,weight*nKStar0 );
- _mult_Ups1_KStarPlus->fill(9.46,weight*nKStarPlus);
- _mult_Ups1_Phi ->fill(9.46,weight*nPhi );
+ _mult_Ups1_Omega ->fill(9.46,nOmega );
+ _mult_Ups1_Rho0 ->fill(9.46,nRho0 );
+ _mult_Ups1_KStar0 ->fill(9.46,nKStar0 );
+ _mult_Ups1_KStarPlus->fill(9.46,nKStarPlus);
+ _mult_Ups1_Phi ->fill(9.46,nPhi );
}
else {
- _mult_Ups4_Omega ->fill(10.58,weight*nOmega );
- _mult_Ups4_Rho0 ->fill(10.58,weight*nRho0 );
- _mult_Ups4_KStar0 ->fill(10.58,weight*nKStar0 );
- _mult_Ups4_KStarPlus->fill(10.58,weight*nKStarPlus);
- _mult_Ups4_Phi ->fill(10.58,weight*nPhi );
+ _mult_Ups4_Omega ->fill(10.58,nOmega );
+ _mult_Ups4_Rho0 ->fill(10.58,nRho0 );
+ _mult_Ups4_KStar0 ->fill(10.58,nKStar0 );
+ _mult_Ups4_KStarPlus->fill(10.58,nKStarPlus);
+ _mult_Ups4_Phi ->fill(10.58,nPhi );
}
}
}
}
void finalize() {
if (_weightSum_cont > 0.) {
/// @todo Replace with Counters and fill one-point Scatters at the end
scale(_mult_cont_Omega , 1./_weightSum_cont);
scale(_mult_cont_Rho0 , 1./_weightSum_cont);
scale(_mult_cont_KStar0 , 1./_weightSum_cont);
scale(_mult_cont_KStarPlus, 1./_weightSum_cont);
scale(_mult_cont_Phi , 1./_weightSum_cont);
scale(_hist_cont_KStarPlus, 1./_weightSum_cont);
scale(_hist_cont_KStar0 , 1./_weightSum_cont);
scale(_hist_cont_Rho0 , 1./_weightSum_cont);
scale(_hist_cont_Omega , 1./_weightSum_cont);
}
if (_weightSum_Ups1 > 0.) {
/// @todo Replace with Counters and fill one-point Scatters at the end
scale(_mult_Ups1_Omega , 1./_weightSum_Ups1);
scale(_mult_Ups1_Rho0 , 1./_weightSum_Ups1);
scale(_mult_Ups1_KStar0 , 1./_weightSum_Ups1);
scale(_mult_Ups1_KStarPlus, 1./_weightSum_Ups1);
scale(_mult_Ups1_Phi , 1./_weightSum_Ups1);
scale(_hist_Ups1_KStarPlus, 1./_weightSum_Ups1);
scale(_hist_Ups1_KStar0 , 1./_weightSum_Ups1);
scale(_hist_Ups1_Rho0 , 1./_weightSum_Ups1);
scale(_hist_Ups1_Omega , 1./_weightSum_Ups1);
}
if (_weightSum_Ups4 > 0.) {
/// @todo Replace with Counters and fill one-point Scatters at the end
scale(_mult_Ups4_Omega , 1./_weightSum_Ups4);
scale(_mult_Ups4_Rho0 , 1./_weightSum_Ups4);
scale(_mult_Ups4_KStar0 , 1./_weightSum_Ups4);
scale(_mult_Ups4_KStarPlus, 1./_weightSum_Ups4);
scale(_mult_Ups4_Phi , 1./_weightSum_Ups4);
scale(_hist_Ups4_KStarPlus, 1./_weightSum_Ups4);
scale(_hist_Ups4_KStar0 , 1./_weightSum_Ups4);
scale(_hist_Ups4_Rho0 , 1./_weightSum_Ups4);
}
}
private:
//@{
Histo1DPtr _mult_cont_Omega, _mult_cont_Rho0, _mult_cont_KStar0, _mult_cont_KStarPlus, _mult_cont_Phi;
Histo1DPtr _mult_Ups1_Omega, _mult_Ups1_Rho0, _mult_Ups1_KStar0, _mult_Ups1_KStarPlus, _mult_Ups1_Phi;
Histo1DPtr _mult_Ups4_Omega, _mult_Ups4_Rho0, _mult_Ups4_KStar0, _mult_Ups4_KStarPlus, _mult_Ups4_Phi;
Histo1DPtr _hist_cont_KStarPlus, _hist_Ups1_KStarPlus, _hist_Ups4_KStarPlus;
Histo1DPtr _hist_cont_KStar0, _hist_Ups1_KStar0, _hist_Ups4_KStar0 ;
Histo1DPtr _hist_cont_Rho0, _hist_Ups1_Rho0, _hist_Ups4_Rho0;
Histo1DPtr _hist_cont_Omega, _hist_Ups1_Omega;
- double _weightSum_cont,_weightSum_Ups1,_weightSum_Ups4;
+ CounterPtr _weightSum_cont,_weightSum_Ups1,_weightSum_Ups4;
//@}
void findDecayProducts(const GenParticle* p, Particles& unstable) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = abs((*pp)->pdg_id());
if (id == 113 || id == 313 || id == 323 ||
id == 333 || id == 223 ) {
unstable.push_back(Particle(*pp));
}
else if ((*pp)->end_vertex())
findDecayProducts(*pp, unstable);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ARGUS_1993_S2789213);
}
diff --git a/analyses/pluginMisc/BABAR_2003_I593379.cc b/analyses/pluginMisc/BABAR_2003_I593379.cc
--- a/analyses/pluginMisc/BABAR_2003_I593379.cc
+++ b/analyses/pluginMisc/BABAR_2003_I593379.cc
@@ -1,186 +1,186 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Babar charmonium spectra
/// @author Peter Richardson
class BABAR_2003_I593379 : public Analysis {
public:
BABAR_2003_I593379()
- : Analysis("BABAR_2003_I593379"), _weightSum(0.)
+ : Analysis("BABAR_2003_I593379")
{ }
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Find the charmonia
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles())
if (p.pid() == 300553) upsilons.push_back(p);
// Then in whole event if fails
if (upsilons.empty()) {
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if (p->pdg_id() != 300553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
// Find upsilons
foreach (const Particle& p, upsilons) {
- _weightSum += weight;
+ _weightSum->fill();
// Find the charmonium resonances
/// @todo Use Rivet::Particles
vector<const GenParticle*> allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2;
findDecayProducts(p.genParticle(), allJpsi, primaryJpsi, Psiprime,
all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2);
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.mom().betaVec());
for (size_t i = 0; i < allJpsi.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(allJpsi[i]->momentum())).p();
- _hist_all_Jpsi->fill(pcm, weight);
+ _hist_all_Jpsi->fill(pcm);
}
- _mult_JPsi->fill(10.58, weight*double(allJpsi.size()));
+ _mult_JPsi->fill(10.58, double(allJpsi.size()));
for (size_t i = 0; i < primaryJpsi.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(primaryJpsi[i]->momentum())).p();
- _hist_primary_Jpsi->fill(pcm, weight);
+ _hist_primary_Jpsi->fill(pcm);
}
- _mult_JPsi_direct->fill(10.58, weight*double(primaryJpsi.size()));
+ _mult_JPsi_direct->fill(10.58, double(primaryJpsi.size()));
for (size_t i=0; i<Psiprime.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(Psiprime[i]->momentum())).p();
- _hist_Psi_prime->fill(pcm, weight);
+ _hist_Psi_prime->fill(pcm);
}
- _mult_Psi2S->fill(10.58, weight*double(Psiprime.size()));
+ _mult_Psi2S->fill(10.58, double(Psiprime.size()));
for (size_t i = 0; i < all_chi_c1.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(all_chi_c1[i]->momentum())).p();
- _hist_chi_c1->fill(pcm, weight);
+ _hist_chi_c1->fill(pcm);
}
- _mult_chi_c1->fill(10.58, weight*double(all_chi_c1.size()));
- _mult_chi_c1_direct->fill(10.58, weight*double(primary_chi_c1.size()));
+ _mult_chi_c1->fill(10.58, double(all_chi_c1.size()));
+ _mult_chi_c1_direct->fill(10.58, double(primary_chi_c1.size()));
for (size_t i = 0; i < all_chi_c2.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(all_chi_c2[i]->momentum())).p();
- _hist_chi_c2->fill(pcm, weight);
+ _hist_chi_c2->fill(pcm);
}
- _mult_chi_c2->fill(10.58, weight*double(all_chi_c2.size()));
- _mult_chi_c2_direct->fill(10.58, weight*double(primary_chi_c2.size()));
+ _mult_chi_c2->fill(10.58, double(all_chi_c2.size()));
+ _mult_chi_c2_direct->fill(10.58, double(primary_chi_c2.size()));
}
} // analyze
void finalize() {
scale(_hist_all_Jpsi , 0.5*0.1/_weightSum);
scale(_hist_chi_c1 , 0.5*0.1/_weightSum);
scale(_hist_chi_c2 , 0.5*0.1/_weightSum);
scale(_hist_Psi_prime , 0.5*0.1/_weightSum);
scale(_hist_primary_Jpsi , 0.5*0.1/_weightSum);
scale(_mult_JPsi , 0.5*100./_weightSum);
scale(_mult_JPsi_direct , 0.5*100./_weightSum);
scale(_mult_chi_c1 , 0.5*100./_weightSum);
scale(_mult_chi_c1_direct, 0.5*100./_weightSum);
scale(_mult_chi_c2 , 0.5*100./_weightSum);
scale(_mult_chi_c2_direct, 0.5*100./_weightSum);
scale(_mult_Psi2S , 0.5*100./_weightSum);
} // finalize
void init() {
declare(UnstableFinalState(), "UFS");
book(_mult_JPsi ,1, 1, 1);
book(_mult_JPsi_direct ,1, 1, 2);
book(_mult_chi_c1 ,1, 1, 3);
book(_mult_chi_c1_direct ,1, 1, 4);
book(_mult_chi_c2 ,1, 1, 5);
book(_mult_chi_c2_direct ,1, 1, 6);
book(_mult_Psi2S ,1, 1, 7);
book(_hist_all_Jpsi ,6, 1, 1);
book(_hist_chi_c1 ,7, 1, 1);
book(_hist_chi_c2 ,7, 1, 2);
book(_hist_Psi_prime ,8, 1, 1);
book(_hist_primary_Jpsi ,10, 1, 1);
+
+ book(_weightSum, "TMP/weightSum");
} // init
private:
//@{
// count of weights
- double _weightSum;
+ CounterPtr _weightSum;
/// Histograms
Histo1DPtr _hist_all_Jpsi;
Histo1DPtr _hist_chi_c1;
Histo1DPtr _hist_chi_c2;
Histo1DPtr _hist_Psi_prime;
Histo1DPtr _hist_primary_Jpsi;
Histo1DPtr _mult_JPsi;
Histo1DPtr _mult_JPsi_direct;
Histo1DPtr _mult_chi_c1;
Histo1DPtr _mult_chi_c1_direct;
Histo1DPtr _mult_chi_c2;
Histo1DPtr _mult_chi_c2_direct;
Histo1DPtr _mult_Psi2S;
//@}
void findDecayProducts(const GenParticle* p,
vector<const GenParticle*>& allJpsi,
vector<const GenParticle*>& primaryJpsi,
vector<const GenParticle*>& Psiprime,
vector<const GenParticle*>& all_chi_c1, vector<const GenParticle*>& all_chi_c2,
vector<const GenParticle*>& primary_chi_c1, vector<const GenParticle*>& primary_chi_c2) {
const GenVertex* dv = p->end_vertex();
bool isOnium = false;
/// @todo Use better looping
for (GenVertex::particles_in_const_iterator pp = dv->particles_in_const_begin() ; pp != dv->particles_in_const_end() ; ++pp) {
int id = (*pp)->pdg_id();
id = id%1000;
id -= id%10;
id /= 10;
if (id==44) isOnium = true;
}
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id==100443) {
Psiprime.push_back(*pp);
}
else if (id==20443) {
all_chi_c1.push_back(*pp);
if (!isOnium) primary_chi_c1.push_back(*pp);
}
else if (id==445) {
all_chi_c2.push_back(*pp);
if (!isOnium) primary_chi_c2.push_back(*pp);
}
else if (id==443) {
allJpsi.push_back(*pp);
if (!isOnium) primaryJpsi.push_back(*pp);
}
if ((*pp)->end_vertex()) {
findDecayProducts(*pp, allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2);
}
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2003_I593379);
}
diff --git a/analyses/pluginMisc/BABAR_2005_S6181155.cc b/analyses/pluginMisc/BABAR_2005_S6181155.cc
--- a/analyses/pluginMisc/BABAR_2005_S6181155.cc
+++ b/analyses/pluginMisc/BABAR_2005_S6181155.cc
@@ -1,145 +1,143 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BABAR Xi_c baryons from fragmentation
/// @author Peter Richardson
class BABAR_2005_S6181155 : public Analysis {
public:
BABAR_2005_S6181155()
: Analysis("BABAR_2005_S6181155")
{ }
void init() {
declare(Beam(), "Beams");
declare(UnstableFinalState(), "UFS");
book(_histOnResonanceA ,1,1,1);
book(_histOnResonanceB ,2,1,1);
book(_histOffResonance ,2,1,2);
book(_sigma ,3,1,1);
book(_histOnResonanceA_norm ,4,1,1);
book(_histOnResonanceB_norm ,5,1,1);
book(_histOffResonance_norm ,5,1,2);
}
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Loop through unstable FS particles and look for charmed mesons/baryons
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
const Beam beamproj = apply<Beam>(e, "Beams");
const ParticlePair& beams = beamproj.beams();
const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum();
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec());
const double s = sqr(beamproj.sqrtS());
const bool onresonance = fuzzyEquals(beamproj.sqrtS()/GeV, 10.58, 2E-3);
foreach (const Particle& p, ufs.particles()) {
// 3-momentum in CMS frame
const double mom = cms_boost.transform(p.momentum()).vector3().mod();
// Only looking at Xi_c^0
if (p.abspid() != 4132 ) continue;
if (onresonance) {
- _histOnResonanceA_norm->fill(mom,weight);
- _histOnResonanceB_norm->fill(mom,weight);
+ _histOnResonanceA_norm->fill(mom);
+ _histOnResonanceB_norm->fill(mom);
}
else {
- _histOffResonance_norm->fill(mom,s/sqr(10.58)*weight);
+ _histOffResonance_norm->fill(mom,s/sqr(10.58));
}
MSG_DEBUG("mom = " << mom);
// off-resonance cross section
if (checkDecay(p.genParticle())) {
if (onresonance) {
- _histOnResonanceA->fill(mom,weight);
- _histOnResonanceB->fill(mom,weight);
+ _histOnResonanceA->fill(mom);
+ _histOnResonanceB->fill(mom);
}
else {
- _histOffResonance->fill(mom,s/sqr(10.58)*weight);
- _sigma->fill(10.6,weight);
+ _histOffResonance->fill(mom,s/sqr(10.58));
+ _sigma->fill(10.6);
}
}
}
}
void finalize() {
scale(_histOnResonanceA, crossSection()/femtobarn/sumOfWeights());
scale(_histOnResonanceB, crossSection()/femtobarn/sumOfWeights());
scale(_histOffResonance, crossSection()/femtobarn/sumOfWeights());
scale(_sigma , crossSection()/femtobarn/sumOfWeights());
normalize(_histOnResonanceA_norm);
normalize(_histOnResonanceB_norm);
normalize(_histOffResonance_norm);
}
private:
//@{
/// Histograms
Histo1DPtr _histOnResonanceA;
Histo1DPtr _histOnResonanceB;
Histo1DPtr _histOffResonance;
Histo1DPtr _sigma ;
Histo1DPtr _histOnResonanceA_norm;
Histo1DPtr _histOnResonanceB_norm;
Histo1DPtr _histOffResonance_norm;
//@}
bool checkDecay(const GenParticle* p) {
unsigned int nstable = 0, npip = 0, npim = 0;
unsigned int nXim = 0, nXip = 0;
findDecayProducts(p, nstable, npip, npim, nXip, nXim);
int id = p->pdg_id();
// Xi_c
if (id == 4132) {
if (nstable == 2 && nXim == 1 && npip == 1) return true;
}
else if (id == -4132) {
if (nstable == 2 && nXip == 1 && npim == 1) return true;
}
return false;
}
void findDecayProducts(const GenParticle* p,
unsigned int& nstable,
unsigned int& npip, unsigned int& npim,
unsigned int& nXip, unsigned int& nXim) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id==3312) {
++nXim;
++nstable;
} else if (id == -3312) {
++nXip;
++nstable;
} else if(id == 111 || id == 221) {
++nstable;
} else if ((*pp)->end_vertex()) {
findDecayProducts(*pp, nstable, npip, npim, nXip, nXim);
} else {
if (id != 22) ++nstable;
if (id == 211) ++npip;
else if(id == -211) ++npim;
}
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2005_S6181155);
}
diff --git a/analyses/pluginMisc/BABAR_2007_S6895344.cc b/analyses/pluginMisc/BABAR_2007_S6895344.cc
--- a/analyses/pluginMisc/BABAR_2007_S6895344.cc
+++ b/analyses/pluginMisc/BABAR_2007_S6895344.cc
@@ -1,86 +1,84 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BABAR Lambda_c from fragmentation
/// @author Peter Richardson
class BABAR_2007_S6895344 : public Analysis {
public:
BABAR_2007_S6895344()
: Analysis("BABAR_2007_S6895344")
{ }
void init() {
declare(Beam(), "Beams");
declare(UnstableFinalState(), "UFS");
book(_histOff ,1,1,1);
book(_sigmaOff ,2,1,1);
book(_histOn ,3,1,1);
book(_sigmaOn ,4,1,1);
}
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Loop through unstable FS particles and look for charmed mesons/baryons
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
const Beam beamproj = apply<Beam>(e, "Beams");
const ParticlePair& beams = beamproj.beams();
const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum();
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec());
const double s = sqr(beamproj.sqrtS());
const bool onresonance = fuzzyEquals(beamproj.sqrtS(), 10.58, 2E-3);
// Particle masses from PDGlive (accessed online 16. Nov. 2009).
foreach (const Particle& p, ufs.particles()) {
// Only looking at Lambda_c
if (p.abspid() != 4122) continue;
MSG_DEBUG("Lambda_c found");
const double mH2 = 5.22780; // 2.28646^2
const double mom = FourMomentum(cms_boost.transform(p.momentum())).p();
const double xp = mom/sqrt(s/4.0 - mH2);
if (onresonance) {
- _histOn ->fill(xp,weight);
- _sigmaOn ->fill(10.58, weight);
+ _histOn ->fill(xp);
+ _sigmaOn ->fill(10.58);
} else {
- _histOff ->fill(xp,weight);
- _sigmaOff->fill(10.54, weight);
+ _histOff ->fill(xp);
+ _sigmaOff->fill(10.54);
}
}
}
void finalize() {
scale(_sigmaOn , 1./sumOfWeights());
scale(_sigmaOff, 1./sumOfWeights());
scale(_histOn , 1./sumOfWeights());
scale(_histOff , 1./sumOfWeights());
}
private:
//@{
// Histograms for the continuum cross sections
Histo1DPtr _sigmaOn ;
Histo1DPtr _sigmaOff;
Histo1DPtr _histOn ;
Histo1DPtr _histOff ;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2007_S6895344);
}
diff --git a/analyses/pluginMisc/BABAR_2007_S7266081.cc b/analyses/pluginMisc/BABAR_2007_S7266081.cc
--- a/analyses/pluginMisc/BABAR_2007_S7266081.cc
+++ b/analyses/pluginMisc/BABAR_2007_S7266081.cc
@@ -1,190 +1,190 @@
// -*- C++ -*-
#include <iostream>
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BABAR tau lepton to three charged hadrons
/// @author Peter Richardson
class BABAR_2007_S7266081 : public Analysis {
public:
BABAR_2007_S7266081()
: Analysis("BABAR_2007_S7266081")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_hist_pipipi_pipipi , 1, 1, 1);
book(_hist_pipipi_pipi , 2, 1, 1);
book(_hist_Kpipi_Kpipi , 3, 1, 1);
book(_hist_Kpipi_Kpi , 4, 1, 1);
book(_hist_Kpipi_pipi , 5, 1, 1);
book(_hist_KpiK_KpiK , 6, 1, 1);
book(_hist_KpiK_KK , 7, 1, 1);
book(_hist_KpiK_piK , 8, 1, 1);
book(_hist_KKK_KKK , 9, 1, 1);
book(_hist_KKK_KK ,10, 1, 1);
book(_weight_total, "weight_total");
book(_weight_pipipi, "weight_pipipi");
book(_weight_Kpipi, "weight_Kpipi");
book(_weight_KpiK, "weight_KpiK");
book(_weight_KKK, "weight_KKK");
+ /// @note Using autobooking for these scatters since their x values are not really obtainable from the MC data
+ book(tmp11, 11, 1, 1, true);
+ book(tmp12, 12, 1, 1, true);
+ book(tmp13, 13, 1, 1, true);
+ book(tmp14, 14, 1, 1, true);
}
void analyze(const Event& e) {
// Find the taus
Particles taus;
foreach(const Particle& p, apply<UnstableFinalState>(e, "UFS").particles(Cuts::pid==PID::TAU)) {
_weight_total->fill();
Particles pip, pim, Kp, Km;
unsigned int nstable = 0;
// Get the boost to the rest frame
LorentzTransform cms_boost;
if (p.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
// Find the decay products we want
findDecayProducts(p.genParticle(), nstable, pip, pim, Kp, Km);
if (p.pid() < 0) {
swap(pip, pim);
swap(Kp, Km );
}
if (nstable != 4) continue;
// pipipi
if (pim.size() == 2 && pip.size() == 1) {
_weight_pipipi->fill();
_hist_pipipi_pipipi->
fill((pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass());
_hist_pipipi_pipi->
fill((pip[0].momentum()+pim[0].momentum()).mass());
_hist_pipipi_pipi->
fill((pip[0].momentum()+pim[1].momentum()).mass());
}
else if (pim.size() == 1 && pip.size() == 1 && Km.size() == 1) {
_weight_Kpipi->fill();
_hist_Kpipi_Kpipi->
fill((pim[0].momentum()+pip[0].momentum()+Km[0].momentum()).mass());
_hist_Kpipi_Kpi->
fill((pip[0].momentum()+Km[0].momentum()).mass());
_hist_Kpipi_pipi->
fill((pim[0].momentum()+pip[0].momentum()).mass());
}
else if (Kp.size() == 1 && Km.size() == 1 && pim.size() == 1) {
_weight_KpiK->fill();
_hist_KpiK_KpiK->
fill((Kp[0].momentum()+Km[0].momentum()+pim[0].momentum()).mass());
_hist_KpiK_KK->
fill((Kp[0].momentum()+Km[0].momentum()).mass());
_hist_KpiK_piK->
fill((Kp[0].momentum()+pim[0].momentum()).mass());
}
else if (Kp.size() == 1 && Km.size() == 2) {
_weight_KKK->fill();
_hist_KKK_KKK->
fill((Kp[0].momentum()+Km[0].momentum()+Km[1].momentum()).mass());
_hist_KKK_KK->
fill((Kp[0].momentum()+Km[0].momentum()).mass());
_hist_KKK_KK->
fill((Kp[0].momentum()+Km[1].momentum()).mass());
}
}
}
void finalize() {
if (_weight_pipipi > 0.) {
scale(_hist_pipipi_pipipi, 1.0/_weight_pipipi);
scale(_hist_pipipi_pipi , 0.5/_weight_pipipi);
}
if (_weight_Kpipi > 0.) {
scale(_hist_Kpipi_Kpipi , 1.0/_weight_Kpipi);
scale(_hist_Kpipi_Kpi , 1.0/_weight_Kpipi);
scale(_hist_Kpipi_pipi , 1.0/_weight_Kpipi);
}
if (_weight_KpiK > 0.) {
scale(_hist_KpiK_KpiK , 1.0/_weight_KpiK);
scale(_hist_KpiK_KK , 1.0/_weight_KpiK);
scale(_hist_KpiK_piK , 1.0/_weight_KpiK);
}
if (_weight_KKK > 0.) {
scale(_hist_KKK_KKK , 1.0/_weight_KKK);
scale(_hist_KKK_KK , 0.5/_weight_KKK);
}
- /// @note Using autobooking for these scatters since their x values are not really obtainable from the MC data
- Scatter2DPtr tmp11, tmp12, tmp13, tmp14;
- book(tmp11, 11, 1, 1, true);
- book(tmp12, 12, 1, 1, true);
- book(tmp13, 13, 1, 1, true);
- book(tmp14, 14, 1, 1, true);
tmp11->point(0).setY(100*_weight_pipipi/_weight_total, 100*sqrt(double(_weight_pipipi))/_weight_total);
tmp12->point(0).setY(100*_weight_Kpipi/_weight_total, 100*sqrt(double(_weight_Kpipi))/_weight_total);
tmp13->point(0).setY(100*_weight_KpiK/_weight_total, 100*sqrt(double(_weight_KpiK))/_weight_total);
tmp14->point(0).setY(100*_weight_KKK/_weight_total, 100*sqrt(double(_weight_KKK))/_weight_total);
}
private:
//@{
-
+ Scatter2DPtr tmp11, tmp12, tmp13, tmp14;
+
// Histograms
Histo1DPtr _hist_pipipi_pipipi, _hist_pipipi_pipi;
Histo1DPtr _hist_Kpipi_Kpipi, _hist_Kpipi_Kpi, _hist_Kpipi_pipi;
Histo1DPtr _hist_KpiK_KpiK, _hist_KpiK_KK, _hist_KpiK_piK;
Histo1DPtr _hist_KKK_KKK, _hist_KKK_KK;
// Weights counters
CounterPtr _weight_total, _weight_pipipi, _weight_Kpipi, _weight_KpiK, _weight_KKK;
//@}
void findDecayProducts(const GenParticle* p,
unsigned int & nstable,
Particles& pip, Particles& pim,
Particles& Kp, Particles& Km) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id == PID::PI0 )
++nstable;
else if (id == PID::K0S)
++nstable;
else if (id == PID::PIPLUS) {
pip.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::PIMINUS) {
pim.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::KPLUS) {
Kp.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::KMINUS) {
Km.push_back(Particle(**pp));
++nstable;
}
else if ((*pp)->end_vertex()) {
findDecayProducts(*pp, nstable, pip, pim, Kp, Km);
}
else
++nstable;
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2007_S7266081);
}
diff --git a/analyses/pluginMisc/BABAR_2013_I1238276.cc b/analyses/pluginMisc/BABAR_2013_I1238276.cc
--- a/analyses/pluginMisc/BABAR_2013_I1238276.cc
+++ b/analyses/pluginMisc/BABAR_2013_I1238276.cc
@@ -1,117 +1,115 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief BaBar pion, kaon and proton production in the continuum
/// @author Peter Richardson
class BABAR_2013_I1238276 : public Analysis {
public:
BABAR_2013_I1238276()
: Analysis("BABAR_2013_I1238276")
{ }
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
book(_histPion_no_dec ,1,1,1);
book(_histKaon_no_dec ,1,1,2);
book(_histProton_no_dec ,1,1,3);
book(_histPion_dec ,2,1,1);
book(_histKaon_dec ,2,1,2);
book(_histProton_dec ,2,1,3);
}
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Loop through charged FS particles and look for charmed mesons/baryons
const ChargedFinalState& fs = apply<ChargedFinalState>(e, "FS");
const Beam beamproj = apply<Beam>(e, "Beams");
const ParticlePair& beams = beamproj.beams();
const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum();
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec());
MSG_DEBUG("CMS Energy sqrt s = " << beamproj.sqrtS());
foreach (const Particle& p, fs.particles()) {
// check if prompt or not
const GenParticle* pmother = p.genParticle();
const GenVertex* ivertex = pmother->production_vertex();
bool prompt = true;
while (ivertex) {
int n_inparts = ivertex->particles_in_size();
if (n_inparts < 1) break;
pmother = particles(ivertex, HepMC::parents)[0]; // first mother particle
int mother_pid = abs(pmother->pdg_id());
if (mother_pid==PID::K0S || mother_pid==PID::LAMBDA) {
prompt = false;
break;
}
else if (mother_pid<6) {
break;
}
ivertex = pmother->production_vertex();
}
// momentum in CMS frame
const double mom = cms_boost.transform(p.momentum()).vector3().mod();
const int PdgId = p.abspid();
MSG_DEBUG("pdgID = " << PdgId << " Momentum = " << mom);
switch (PdgId) {
case PID::PIPLUS:
- if(prompt) _histPion_no_dec->fill(mom,weight);
- _histPion_dec ->fill(mom,weight);
+ if(prompt) _histPion_no_dec->fill(mom);
+ _histPion_dec ->fill(mom);
break;
case PID::KPLUS:
- if(prompt) _histKaon_no_dec->fill(mom,weight);
- _histKaon_dec ->fill(mom,weight);
+ if(prompt) _histKaon_no_dec->fill(mom);
+ _histKaon_dec ->fill(mom);
break;
case PID::PROTON:
- if(prompt) _histProton_no_dec->fill(mom,weight);
- _histProton_dec ->fill(mom,weight);
+ if(prompt) _histProton_no_dec->fill(mom);
+ _histProton_dec ->fill(mom);
default :
break;
}
}
}
void finalize() {
scale(_histPion_no_dec ,1./sumOfWeights());
scale(_histKaon_no_dec ,1./sumOfWeights());
scale(_histProton_no_dec,1./sumOfWeights());
scale(_histPion_dec ,1./sumOfWeights());
scale(_histKaon_dec ,1./sumOfWeights());
scale(_histProton_dec ,1./sumOfWeights());
}
private:
//@{
// Histograms for continuum data (sqrt(s) = 10.52 GeV)
// no K_S and Lambda decays
Histo1DPtr _histPion_no_dec;
Histo1DPtr _histKaon_no_dec;
Histo1DPtr _histProton_no_dec;
// including decays
Histo1DPtr _histPion_dec;
Histo1DPtr _histKaon_dec;
Histo1DPtr _histProton_dec;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2013_I1238276);
}
diff --git a/analyses/pluginMisc/BELLE_2001_S4598261.cc b/analyses/pluginMisc/BELLE_2001_S4598261.cc
--- a/analyses/pluginMisc/BELLE_2001_S4598261.cc
+++ b/analyses/pluginMisc/BELLE_2001_S4598261.cc
@@ -1,106 +1,105 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BELLE pi0 spectrum at Upsilon(4S)
/// @author Peter Richardson
class BELLE_2001_S4598261 : public Analysis {
public:
BELLE_2001_S4598261()
- : Analysis("BELLE_2001_S4598261"), _weightSum(0.)
+ : Analysis("BELLE_2001_S4598261")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_histdSigDp ,1, 1, 1); // spectrum
book(_histMult ,2, 1, 1); // multiplicity
+ book(_weightSum, "TMP/weightSum");
}
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Find the upsilons
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles())
if (p.pid()==300553) upsilons.push_back(p);
// Then in whole event if fails
if (upsilons.empty()) {
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if (p->pdg_id() != 300553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
/// @todo Use better looping
for (GenVertex::particles_in_const_iterator pp = pv->particles_in_const_begin() ; pp != pv->particles_in_const_end() ; ++pp) {
if ( p->pdg_id() == (*pp)->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(p));
}
}
// Find upsilons
foreach (const Particle& p, upsilons) {
- _weightSum += weight;
+ _weightSum->fill();
// Find the neutral pions from the decay
vector<GenParticle *> pions;
findDecayProducts(p.genParticle(), pions);
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
for (size_t ix=0; ix<pions.size(); ++ix) {
const double pcm = cms_boost.transform(FourMomentum(pions[ix]->momentum())).p();
- _histdSigDp->fill(pcm,weight);
+ _histdSigDp->fill(pcm);
}
- _histMult->fill(0., pions.size()*weight);
+ _histMult->fill(0., pions.size());
}
}
void finalize() {
scale(_histdSigDp, 1./_weightSum);
scale(_histMult , 1./_weightSum);
}
private:
//@{
// count of weights
- double _weightSum;
+ CounterPtr _weightSum;
/// Histograms
Histo1DPtr _histdSigDp;
Histo1DPtr _histMult;
//@}
void findDecayProducts(const GenParticle* p, vector<GenParticle*>& pions) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
const int id = (*pp)->pdg_id();
if (id == 111) {
pions.push_back(*pp);
} else if ((*pp)->end_vertex())
findDecayProducts(*pp, pions);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BELLE_2001_S4598261);
}
diff --git a/analyses/pluginMisc/BELLE_2008_I786560.cc b/analyses/pluginMisc/BELLE_2008_I786560.cc
--- a/analyses/pluginMisc/BELLE_2008_I786560.cc
+++ b/analyses/pluginMisc/BELLE_2008_I786560.cc
@@ -1,112 +1,112 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BELLE tau lepton to pi pi
/// @author Peter Richardson
class BELLE_2008_I786560 : public Analysis {
public:
BELLE_2008_I786560()
- : Analysis("BELLE_2008_I786560"),
- _weight_total(0),
- _weight_pipi(0)
+ : Analysis("BELLE_2008_I786560")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_hist_pipi , 1, 1, 1);
+ book(_weight_total, "TMP/weight_total");
+ book(_weight_pipi, "TMP/weight_pipi");
}
void analyze(const Event& e) {
// Find the taus
Particles taus;
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
if (p.abspid() != PID::TAU) continue;
- _weight_total += 1.;
+ _weight_total->fill();
Particles pip, pim, pi0;
unsigned int nstable = 0;
// get the boost to the rest frame
LorentzTransform cms_boost;
if (p.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
// find the decay products we want
findDecayProducts(p.genParticle(), nstable, pip, pim, pi0);
if (p.pid() < 0) {
swap(pip, pim);
}
if (nstable != 3) continue;
// pipi
if (pim.size() == 1 && pi0.size() == 1) {
- _weight_pipi += 1.;
- _hist_pipi->fill((pi0[0].momentum()+pim[0].momentum()).mass2(),1.);
+ _weight_pipi->fill();
+ _hist_pipi->fill((pi0[0].momentum()+pim[0].momentum()).mass2());
}
}
}
void finalize() {
if (_weight_pipi > 0.) scale(_hist_pipi, 1./_weight_pipi);
}
private:
//@{
// Histograms
Histo1DPtr _hist_pipi;
// Weights counters
- double _weight_total, _weight_pipi;
+ CounterPtr _weight_total, _weight_pipi;
//@}
void findDecayProducts(const GenParticle* p,
unsigned int & nstable,
Particles& pip, Particles& pim,
Particles& pi0) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id == PID::PI0 ) {
pi0.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::K0S)
++nstable;
else if (id == PID::PIPLUS) {
pip.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::PIMINUS) {
pim.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::KPLUS) {
++nstable;
}
else if (id == PID::KMINUS) {
++nstable;
}
else if ((*pp)->end_vertex()) {
findDecayProducts(*pp, nstable, pip, pim, pi0);
}
else
++nstable;
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BELLE_2008_I786560);
}
diff --git a/analyses/pluginMisc/BELLE_2013_I1216515.cc b/analyses/pluginMisc/BELLE_2013_I1216515.cc
--- a/analyses/pluginMisc/BELLE_2013_I1216515.cc
+++ b/analyses/pluginMisc/BELLE_2013_I1216515.cc
@@ -1,81 +1,79 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief BELLE pion and kaon continuum production
/// @author Peter Richardson
class BELLE_2013_I1216515 : public Analysis {
public:
BELLE_2013_I1216515()
: Analysis("BELLE_2013_I1216515")
{ }
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Loop through charged FS particles and look for charmed mesons/baryons
const ChargedFinalState& fs = apply<ChargedFinalState>(e, "FS");
const Beam beamproj = apply<Beam>(e, "Beams");
const ParticlePair& beams = beamproj.beams();
const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum();
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec());
MSG_DEBUG("CMS energy sqrt s = " << beamproj.sqrtS());
foreach (const Particle& p, fs.particles()) {
// energy in CMS frame
const double en = cms_boost.transform(p.momentum()).t();
const double z = 2.*en/beamproj.sqrtS();
const int PdgId = p.abspid();
MSG_DEBUG("pdgID = " << PdgId << " Energy = " << en);
switch (PdgId) {
case PID::PIPLUS:
- _histPion->fill(z,weight);
+ _histPion->fill(z);
break;
case PID::KPLUS:
- _histKaon->fill(z,weight);
+ _histKaon->fill(z);
break;
default :
break;
}
}
} // analyze
void finalize() {
scale(_histPion,crossSection()/femtobarn/sumOfWeights());
scale(_histKaon,crossSection()/femtobarn/sumOfWeights());
} // finalize
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
book(_histPion ,1,1,1);
book(_histKaon ,1,1,2);
} // init
private:
//@{
// Histograms for continuum data (sqrt(s) = 10.52 GeV)
Histo1DPtr _histPion;
Histo1DPtr _histKaon;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BELLE_2013_I1216515);
}
diff --git a/analyses/pluginMisc/CLEO_2004_S5809304.cc b/analyses/pluginMisc/CLEO_2004_S5809304.cc
--- a/analyses/pluginMisc/CLEO_2004_S5809304.cc
+++ b/analyses/pluginMisc/CLEO_2004_S5809304.cc
@@ -1,164 +1,162 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief CLEO charmed mesons and baryons from fragmentation
/// @author Peter Richardson
class CLEO_2004_S5809304 : public Analysis {
public:
CLEO_2004_S5809304()
: Analysis("CLEO_2004_S5809304")
{ }
void analyze(const Event& e) {
- const double weight = 1.0;
-
// Loop through unstable FS particles and look for charmed mesons/baryons
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
const Beam beamproj = apply<Beam>(e, "Beams");
const ParticlePair& beams = beamproj.beams();
const FourMomentum mom_tot = beams.first.momentum() + beams.second.momentum();
LorentzTransform cms_boost;
if (mom_tot.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(mom_tot.betaVec());
const double s = sqr(beamproj.sqrtS());
// Particle masses from PDGlive (accessed online 16. Nov. 2009).
foreach (const Particle& p, ufs.particles()) {
double xp = 0.0;
double mH2 = 0.0;
// 3-momentum in CMS frame
const double mom = cms_boost.transform(p.momentum()).vector3().mod();
const int PdgId = p.abspid();
MSG_DEBUG("pdgID = " << PdgId << " mom = " << mom);
switch (PdgId) {
case 421:
MSG_DEBUG("D0 found");
mH2 = 3.47763; // 1.86484^2
xp = mom/sqrt(s/4.0 - mH2);
- _sigmaD0A->fill(10.6,weight);
- _sigmaD0B->fill(10.6,weight);
- _histXpD0A->fill(xp, weight);
- _histXpD0B->fill(xp, weight);
- _histXpTotal->fill(xp, weight);
+ _sigmaD0A->fill(10.6);
+ _sigmaD0B->fill(10.6);
+ _histXpD0A->fill(xp);
+ _histXpD0B->fill(xp);
+ _histXpTotal->fill(xp);
break;
case 411:
MSG_DEBUG("D+ found");
mH2 = 3.49547; // 1.86962^2
xp = mom/sqrt(s/4.0 - mH2);
- _sigmaDPlus->fill(10.6,weight);
- _histXpDplus->fill(xp, weight);
- _histXpTotal->fill(xp, weight);
+ _sigmaDPlus->fill(10.6);
+ _histXpDplus->fill(xp);
+ _histXpTotal->fill(xp);
break;
case 413:
MSG_DEBUG("D*+ found");
mH2 = 4.04119; // 2.01027^2
xp = mom/sqrt(s/4.0 - mH2);
- _sigmaDStarPlusA->fill(10.6,weight);
- _sigmaDStarPlusB->fill(10.6,weight);
- _histXpDStarPlusA->fill(xp, weight);
- _histXpDStarPlusB->fill(xp, weight);
- _histXpTotal->fill(xp, weight);
+ _sigmaDStarPlusA->fill(10.6);
+ _sigmaDStarPlusB->fill(10.6);
+ _histXpDStarPlusA->fill(xp);
+ _histXpDStarPlusB->fill(xp);
+ _histXpTotal->fill(xp);
break;
case 423:
MSG_DEBUG("D*0 found");
mH2 = 4.02793; // 2.00697**2
xp = mom/sqrt(s/4.0 - mH2);
- _sigmaDStar0A->fill(10.6,weight);
- _sigmaDStar0B->fill(10.6,weight);
- _histXpDStar0A->fill(xp, weight);
- _histXpDStar0B->fill(xp, weight);
- _histXpTotal->fill(xp, weight);
+ _sigmaDStar0A->fill(10.6);
+ _sigmaDStar0B->fill(10.6);
+ _histXpDStar0A->fill(xp);
+ _histXpDStar0B->fill(xp);
+ _histXpTotal->fill(xp);
break;
}
}
} // analyze
void finalize() {
scale(_sigmaDPlus , crossSection()/picobarn/sumOfWeights());
scale(_sigmaD0A , crossSection()/picobarn/sumOfWeights());
scale(_sigmaD0B , crossSection()/picobarn/sumOfWeights());
scale(_sigmaDStarPlusA, crossSection()/picobarn/sumOfWeights());
scale(_sigmaDStarPlusB, crossSection()/picobarn/sumOfWeights());
scale(_sigmaDStar0A , crossSection()/picobarn/sumOfWeights());
scale(_sigmaDStar0B , crossSection()/picobarn/sumOfWeights());
scale(_histXpDplus , crossSection()/picobarn/sumOfWeights());
scale(_histXpD0A , crossSection()/picobarn/sumOfWeights());
scale(_histXpD0B , crossSection()/picobarn/sumOfWeights());
scale(_histXpDStarPlusA, crossSection()/picobarn/sumOfWeights());
scale(_histXpDStarPlusB, crossSection()/picobarn/sumOfWeights());
scale(_histXpDStar0A , crossSection()/picobarn/sumOfWeights());
scale(_histXpDStar0B , crossSection()/picobarn/sumOfWeights());
scale(_histXpTotal , crossSection()/picobarn/sumOfWeights()/4.);
} // finalize
void init() {
declare(Beam(), "Beams");
declare(UnstableFinalState(), "UFS");
// continuum cross sections
book(_sigmaDPlus ,1,1,1);
book(_sigmaD0A ,1,1,2);
book(_sigmaD0B ,1,1,3);
book(_sigmaDStarPlusA ,1,1,4);
book(_sigmaDStarPlusB ,1,1,5);
book(_sigmaDStar0A ,1,1,6);
book(_sigmaDStar0B ,1,1,7);
// histograms for continuum data
book(_histXpDplus ,2, 1, 1);
book(_histXpD0A ,3, 1, 1);
book(_histXpD0B ,4, 1, 1);
book(_histXpDStarPlusA ,5, 1, 1);
book(_histXpDStarPlusB ,6, 1, 1);
book(_histXpDStar0A ,7, 1, 1);
book(_histXpDStar0B ,8, 1, 1);
book(_histXpTotal ,9, 1, 1);
} // init
private:
//@{
// Histograms for the continuum cross sections
Histo1DPtr _sigmaDPlus ;
Histo1DPtr _sigmaD0A ;
Histo1DPtr _sigmaD0B ;
Histo1DPtr _sigmaDStarPlusA;
Histo1DPtr _sigmaDStarPlusB;
Histo1DPtr _sigmaDStar0A ;
Histo1DPtr _sigmaDStar0B ;
// histograms for continuum data
Histo1DPtr _histXpDplus ;
Histo1DPtr _histXpD0A ;
Histo1DPtr _histXpD0B ;
Histo1DPtr _histXpDStarPlusA;
Histo1DPtr _histXpDStarPlusB;
Histo1DPtr _histXpDStar0A ;
Histo1DPtr _histXpDStar0B ;
Histo1DPtr _histXpTotal ;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CLEO_2004_S5809304);
}
diff --git a/analyses/pluginMisc/E735_1998_S3905616.cc b/analyses/pluginMisc/E735_1998_S3905616.cc
--- a/analyses/pluginMisc/E735_1998_S3905616.cc
+++ b/analyses/pluginMisc/E735_1998_S3905616.cc
@@ -1,72 +1,71 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief E735 charged multiplicity in NSD-triggered events
class E735_1998_S3905616 : public Analysis {
public:
/// Constructor
- E735_1998_S3905616() : Analysis("E735_1998_S3905616") {
- _sumWTrig = 0;
- }
+ E735_1998_S3905616() : Analysis("E735_1998_S3905616") {}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(), "FS");
// Histo
book(_hist_multiplicity ,1, 1, 1);
+ book(_sumWTrig, "TMP/sumWtrig");
+
}
void analyze(const Event& event) {
const bool trigger = apply<TriggerUA5>(event, "Trigger").nsdDecision();
if (!trigger) vetoEvent;
- const double weight = 1.0;
- _sumWTrig += weight;
+ _sumWTrig->fill();
const ChargedFinalState& fs = apply<ChargedFinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
- _hist_multiplicity->fill(numParticles, weight);
+ _hist_multiplicity->fill(numParticles);
}
void finalize() {
scale(_hist_multiplicity, 1/_sumWTrig);
}
//@}
private:
/// @name Weight counter
//@{
- double _sumWTrig;
+ CounterPtr _sumWTrig;
//@}
/// @name Histograms
//@{
Histo1DPtr _hist_multiplicity;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(E735_1998_S3905616);
}
diff --git a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc
--- a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc
+++ b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES.cc
@@ -1,770 +1,767 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Implementation of PDG hadron multiplicities
/// @author Hendrik Hoeth
class PDG_HADRON_MULTIPLICITIES : public Analysis {
public:
/// Constructor
PDG_HADRON_MULTIPLICITIES() : Analysis("PDG_HADRON_MULTIPLICITIES")
{
}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = 1.0;
-
MSG_DEBUG("sqrt(s) = " << sqrtS()/GeV << " GeV");
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight);
+ _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid());
break;
case 111:
- _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight);
+ _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
- _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight);
+ _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
- _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight);
+ _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
- _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight);
+ _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
- _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight);
+ _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
- _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight);
+ _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 9010221:
- _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight);
+ _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 113:
- _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight);
+ _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 223:
- _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight);
+ _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
break;
case 323:
- _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight);
+ _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
- _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight);
+ _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
- _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight);
+ _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
- _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 423:
- _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight);
+ _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid());
break;
case 433:
- _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid());
break;
case 443:
- _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight);
+ _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid());
break;
case 225:
- _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight);
+ _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3212:
- _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight);
+ _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3312:
- _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight);
+ _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 2224:
- _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight);
+ _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid());
break;
case 3114:
- _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
- _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3324:
- _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight);
+ _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
break;
case 3334:
- _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight);
+ _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
- _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight);
+ _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
case 4222:
case 4112:
- _histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid(), weight);
+ _histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid());
break;
case 3124:
- _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight);
+ _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight);
+ _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid());
break;
case 111:
- _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight);
+ _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
- _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight);
+ _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
- _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight);
+ _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
- _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight);
+ _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
- _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight);
+ _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
- _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight);
+ _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 9010221:
- _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight);
+ _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 113:
- _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight);
+ _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 323:
- _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight);
+ _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
- _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight);
+ _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
- _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight);
+ _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
- _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 423:
- _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight);
+ _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid());
break;
case 225:
- _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight);
+ _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 325:
- _histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid(), weight);
+ _histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid());
break;
case 315:
- _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight);
+ _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3312:
- _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight);
+ _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 3114:
- _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
- _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3334:
- _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight);
+ _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
- _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight);
+ _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight);
+ _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid());
break;
case 111:
- _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight);
+ _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
- _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight);
+ _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
- _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight);
+ _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
- _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight);
+ _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
- _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight);
+ _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
- _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight);
+ _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 511:
- _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight);
+ _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid());
break;
case 521:
- _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight);
- _histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid(), weight);
+ _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid());
+ _histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid());
break;
case 531:
- _histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid(), weight);
+ _histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid());
break;
case 9010221:
- _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight);
+ _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 9000211:
- _histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid(), weight);
+ _histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid());
break;
case 113:
- _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight);
+ _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 213:
- _histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid(), weight);
+ _histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid());
break;
case 223:
- _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight);
+ _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
break;
case 323:
- _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight);
+ _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
- _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight);
+ _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
- _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight);
+ _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
- _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 433:
- _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid());
break;
case 513:
case 523:
case 533:
- _histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid(), weight);
+ _histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid());
break;
case 443:
- _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight);
+ _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid());
break;
case 100443:
- _histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid(), weight);
+ _histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid());
break;
case 553:
- _histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid(), weight);
+ _histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid());
break;
case 20223:
- _histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid(), weight);
+ _histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid());
break;
case 20333:
- _histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid(), weight);
+ _histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid());
break;
case 445:
- _histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid(), weight);
+ _histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid());
break;
case 225:
- _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight);
+ _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 335:
- _histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid(), weight);
+ _histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid());
break;
case 315:
- _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight);
+ _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid());
break;
case 515:
case 525:
case 535:
- _histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid(), weight);
+ _histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid());
break;
case 10433:
case 20433:
- _histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid(), weight);
+ _histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid());
break;
case 435:
- _histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid(), weight);
+ _histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3212:
- _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight);
+ _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3112:
- _histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid(), weight);
- _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid());
+ _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid());
break;
case 3222:
- _histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid(), weight);
- _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid());
+ _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid());
break;
case 3312:
- _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight);
+ _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 2224:
- _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight);
+ _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid());
break;
case 3114:
- _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
- _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3324:
- _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight);
+ _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
break;
case 3334:
- _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight);
+ _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
- _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight);
+ _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
case 5122:
- _histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid(), weight);
+ _histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid());
break;
case 3124:
- _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight);
+ _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid(), weight);
+ _histMeanMultiPiPlus->fill(_histMeanMultiPiPlus->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
}
}
}
}
void init() {
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
book(_histMeanMultiPiPlus , 1, 1, 1);
book(_histMeanMultiPi0 , 2, 1, 1);
book(_histMeanMultiKPlus , 3, 1, 1);
book(_histMeanMultiK0 , 4, 1, 1);
book(_histMeanMultiEta , 5, 1, 1);
book(_histMeanMultiEtaPrime , 6, 1, 1);
book(_histMeanMultiDPlus , 7, 1, 1);
book(_histMeanMultiD0 , 8, 1, 1);
book(_histMeanMultiDPlus_s , 9, 1, 1);
book(_histMeanMultiF0_980 ,13, 1, 1);
book(_histMeanMultiRho770_0 ,15, 1, 1);
book(_histMeanMultiOmega782 ,17, 1, 1);
book(_histMeanMultiKStar892Plus ,18, 1, 1);
book(_histMeanMultiKStar892_0 ,19, 1, 1);
book(_histMeanMultiPhi1020 ,20, 1, 1);
book(_histMeanMultiDStar2010Plus ,21, 1, 1);
book(_histMeanMultiDStar2007_0 ,22, 1, 1);
book(_histMeanMultiDStar_s2112Plus ,23, 1, 1);
book(_histMeanMultiJPsi1S ,25, 1, 1);
book(_histMeanMultiF2_1270 ,31, 1, 1);
book(_histMeanMultiP ,38, 1, 1);
book(_histMeanMultiLambda ,39, 1, 1);
book(_histMeanMultiSigma0 ,40, 1, 1);
book(_histMeanMultiXiMinus ,44, 1, 1);
book(_histMeanMultiDelta1232PlusPlus ,45, 1, 1);
book(_histMeanMultiSigma1385Minus ,46, 1, 1);
book(_histMeanMultiSigma1385Plus ,47, 1, 1);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 1);
book(_histMeanMultiXi1530_0 ,49, 1, 1);
book(_histMeanMultiOmegaMinus ,50, 1, 1);
book(_histMeanMultiLambda_c_Plus ,51, 1, 1);
book(_histMeanMultiSigma_c_PlusPlus_0 ,53, 1, 1);
book(_histMeanMultiLambda1520 ,54, 1, 1);
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
book(_histMeanMultiPiPlus , 1, 1, 2);
book(_histMeanMultiPi0 , 2, 1, 2);
book(_histMeanMultiKPlus , 3, 1, 2);
book(_histMeanMultiK0 , 4, 1, 2);
book(_histMeanMultiEta , 5, 1, 2);
book(_histMeanMultiEtaPrime , 6, 1, 2);
book(_histMeanMultiDPlus , 7, 1, 2);
book(_histMeanMultiD0 , 8, 1, 2);
book(_histMeanMultiDPlus_s , 9, 1, 2);
book(_histMeanMultiF0_980 ,13, 1, 2);
book(_histMeanMultiRho770_0 ,15, 1, 2);
book(_histMeanMultiKStar892Plus ,18, 1, 2);
book(_histMeanMultiKStar892_0 ,19, 1, 2);
book(_histMeanMultiPhi1020 ,20, 1, 2);
book(_histMeanMultiDStar2010Plus ,21, 1, 2);
book(_histMeanMultiDStar2007_0 ,22, 1, 2);
book(_histMeanMultiF2_1270 ,31, 1, 2);
book(_histMeanMultiK2Star1430Plus ,33, 1, 1);
book(_histMeanMultiK2Star1430_0 ,34, 1, 1);
book(_histMeanMultiP ,38, 1, 2);
book(_histMeanMultiLambda ,39, 1, 2);
book(_histMeanMultiXiMinus ,44, 1, 2);
book(_histMeanMultiSigma1385Minus ,46, 1, 2);
book(_histMeanMultiSigma1385Plus ,47, 1, 2);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 2);
book(_histMeanMultiOmegaMinus ,50, 1, 2);
book(_histMeanMultiLambda_c_Plus ,51, 1, 2);
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
book(_histMeanMultiPiPlus , 1, 1, 3);
book(_histMeanMultiPi0 , 2, 1, 3);
book(_histMeanMultiKPlus , 3, 1, 3);
book(_histMeanMultiK0 , 4, 1, 3);
book(_histMeanMultiEta , 5, 1, 3);
book(_histMeanMultiEtaPrime , 6, 1, 3);
book(_histMeanMultiDPlus , 7, 1, 3);
book(_histMeanMultiD0 , 8, 1, 3);
book(_histMeanMultiDPlus_s , 9, 1, 3);
book(_histMeanMultiBPlus_B0_d ,10, 1, 1);
book(_histMeanMultiBPlus_u ,11, 1, 1);
book(_histMeanMultiB0_s ,12, 1, 1);
book(_histMeanMultiF0_980 ,13, 1, 3);
book(_histMeanMultiA0_980Plus ,14, 1, 1);
book(_histMeanMultiRho770_0 ,15, 1, 3);
book(_histMeanMultiRho770Plus ,16, 1, 1);
book(_histMeanMultiOmega782 ,17, 1, 2);
book(_histMeanMultiKStar892Plus ,18, 1, 3);
book(_histMeanMultiKStar892_0 ,19, 1, 3);
book(_histMeanMultiPhi1020 ,20, 1, 3);
book(_histMeanMultiDStar2010Plus ,21, 1, 3);
book(_histMeanMultiDStar_s2112Plus ,23, 1, 2);
book(_histMeanMultiBStar ,24, 1, 1);
book(_histMeanMultiJPsi1S ,25, 1, 2);
book(_histMeanMultiPsi2S ,26, 1, 1);
book(_histMeanMultiUpsilon1S ,27, 1, 1);
book(_histMeanMultiF1_1285 ,28, 1, 1);
book(_histMeanMultiF1_1420 ,29, 1, 1);
book(_histMeanMultiChi_c1_3510 ,30, 1, 1);
book(_histMeanMultiF2_1270 ,31, 1, 3);
book(_histMeanMultiF2Prime1525 ,32, 1, 1);
book(_histMeanMultiK2Star1430_0 ,34, 1, 2);
book(_histMeanMultiBStarStar ,35, 1, 1);
book(_histMeanMultiDs1Plus ,36, 1, 1);
book(_histMeanMultiDs2Plus ,37, 1, 1);
book(_histMeanMultiP ,38, 1, 3);
book(_histMeanMultiLambda ,39, 1, 3);
book(_histMeanMultiSigma0 ,40, 1, 2);
book(_histMeanMultiSigmaMinus ,41, 1, 1);
book(_histMeanMultiSigmaPlus ,42, 1, 1);
book(_histMeanMultiSigmaPlusMinus ,43, 1, 1);
book(_histMeanMultiXiMinus ,44, 1, 3);
book(_histMeanMultiDelta1232PlusPlus ,45, 1, 2);
book(_histMeanMultiSigma1385Minus ,46, 1, 3);
book(_histMeanMultiSigma1385Plus ,47, 1, 3);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 3);
book(_histMeanMultiXi1530_0 ,49, 1, 2);
book(_histMeanMultiOmegaMinus ,50, 1, 3);
book(_histMeanMultiLambda_c_Plus ,51, 1, 3);
book(_histMeanMultiLambda_b_0 ,52, 1, 1);
book(_histMeanMultiLambda1520 ,54, 1, 2);
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
book(_histMeanMultiPiPlus , 1, 1, 4);
book(_histMeanMultiKPlus , 3, 1, 4);
book(_histMeanMultiK0 , 4, 1, 4);
book(_histMeanMultiP ,38, 1, 4);
book(_histMeanMultiLambda ,39, 1, 4);
}
}
// Finalize
void finalize() {
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
scale(_histMeanMultiPiPlus , 1.0/sumOfWeights());
scale(_histMeanMultiPi0 , 1.0/sumOfWeights());
scale(_histMeanMultiKPlus , 1.0/sumOfWeights());
scale(_histMeanMultiK0 , 1.0/sumOfWeights());
scale(_histMeanMultiEta , 1.0/sumOfWeights());
scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights());
scale(_histMeanMultiDPlus , 1.0/sumOfWeights());
scale(_histMeanMultiD0 , 1.0/sumOfWeights());
scale(_histMeanMultiDPlus_s , 1.0/sumOfWeights());
scale(_histMeanMultiF0_980 , 1.0/sumOfWeights());
scale(_histMeanMultiRho770_0 , 1.0/sumOfWeights());
scale(_histMeanMultiOmega782 , 1.0/sumOfWeights());
scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights());
scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights());
scale(_histMeanMultiPhi1020 , 1.0/sumOfWeights());
scale(_histMeanMultiDStar2010Plus , 1.0/sumOfWeights());
scale(_histMeanMultiDStar2007_0 , 1.0/sumOfWeights());
scale(_histMeanMultiDStar_s2112Plus , 1.0/sumOfWeights());
scale(_histMeanMultiJPsi1S , 1.0/sumOfWeights());
scale(_histMeanMultiF2_1270 , 1.0/sumOfWeights());
scale(_histMeanMultiP , 1.0/sumOfWeights());
scale(_histMeanMultiLambda , 1.0/sumOfWeights());
scale(_histMeanMultiSigma0 , 1.0/sumOfWeights());
scale(_histMeanMultiXiMinus , 1.0/sumOfWeights());
scale(_histMeanMultiDelta1232PlusPlus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385Minus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385Plus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385PlusMinus, 1.0/sumOfWeights());
scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights());
scale(_histMeanMultiOmegaMinus , 1.0/sumOfWeights());
scale(_histMeanMultiLambda_c_Plus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma_c_PlusPlus_0, 1.0/sumOfWeights());
scale(_histMeanMultiLambda1520 , 1.0/sumOfWeights());
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
scale(_histMeanMultiPiPlus , 5.0/sumOfWeights());
scale(_histMeanMultiPi0 , 5.0/sumOfWeights());
scale(_histMeanMultiKPlus , 5.0/sumOfWeights());
scale(_histMeanMultiK0 , 5.0/sumOfWeights());
scale(_histMeanMultiEta , 5.0/sumOfWeights());
scale(_histMeanMultiEtaPrime , 5.0/sumOfWeights());
scale(_histMeanMultiDPlus , 5.0/sumOfWeights());
scale(_histMeanMultiD0 , 5.0/sumOfWeights());
scale(_histMeanMultiDPlus_s , 5.0/sumOfWeights());
scale(_histMeanMultiF0_980 , 5.0/sumOfWeights());
scale(_histMeanMultiRho770_0 , 5.0/sumOfWeights());
scale(_histMeanMultiKStar892Plus , 5.0/sumOfWeights());
scale(_histMeanMultiKStar892_0 , 5.0/sumOfWeights());
scale(_histMeanMultiPhi1020 , 5.0/sumOfWeights());
scale(_histMeanMultiDStar2010Plus , 5.0/sumOfWeights());
scale(_histMeanMultiDStar2007_0 , 5.0/sumOfWeights());
scale(_histMeanMultiF2_1270 , 5.0/sumOfWeights());
scale(_histMeanMultiK2Star1430Plus , 5.0/sumOfWeights());
scale(_histMeanMultiK2Star1430_0 , 5.0/sumOfWeights());
scale(_histMeanMultiP , 5.0/sumOfWeights());
scale(_histMeanMultiLambda , 5.0/sumOfWeights());
scale(_histMeanMultiXiMinus , 5.0/sumOfWeights());
scale(_histMeanMultiSigma1385Minus , 5.0/sumOfWeights());
scale(_histMeanMultiSigma1385Plus , 5.0/sumOfWeights());
scale(_histMeanMultiSigma1385PlusMinus, 5.0/sumOfWeights());
scale(_histMeanMultiOmegaMinus , 5.0/sumOfWeights());
scale(_histMeanMultiLambda_c_Plus , 5.0/sumOfWeights());
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
scale(_histMeanMultiPiPlus , 1.0/sumOfWeights());
scale(_histMeanMultiPi0 , 1.0/sumOfWeights());
scale(_histMeanMultiKPlus , 1.0/sumOfWeights());
scale(_histMeanMultiK0 , 1.0/sumOfWeights());
scale(_histMeanMultiEta , 1.0/sumOfWeights());
scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights());
scale(_histMeanMultiDPlus , 1.0/sumOfWeights());
scale(_histMeanMultiD0 , 1.0/sumOfWeights());
scale(_histMeanMultiDPlus_s , 1.0/sumOfWeights());
scale(_histMeanMultiBPlus_B0_d , 1.0/sumOfWeights());
scale(_histMeanMultiBPlus_u , 1.0/sumOfWeights());
scale(_histMeanMultiB0_s , 1.0/sumOfWeights());
scale(_histMeanMultiF0_980 , 1.0/sumOfWeights());
scale(_histMeanMultiA0_980Plus , 1.0/sumOfWeights());
scale(_histMeanMultiRho770_0 , 1.0/sumOfWeights());
scale(_histMeanMultiRho770Plus , 1.0/sumOfWeights());
scale(_histMeanMultiOmega782 , 1.0/sumOfWeights());
scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights());
scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights());
scale(_histMeanMultiPhi1020 , 1.0/sumOfWeights());
scale(_histMeanMultiDStar2010Plus , 1.0/sumOfWeights());
scale(_histMeanMultiDStar_s2112Plus , 1.0/sumOfWeights());
scale(_histMeanMultiBStar , 1.0/sumOfWeights());
scale(_histMeanMultiJPsi1S , 1.0/sumOfWeights());
scale(_histMeanMultiPsi2S , 1.0/sumOfWeights());
scale(_histMeanMultiUpsilon1S , 1.0/sumOfWeights());
scale(_histMeanMultiF1_1285 , 1.0/sumOfWeights());
scale(_histMeanMultiF1_1420 , 1.0/sumOfWeights());
scale(_histMeanMultiChi_c1_3510 , 1.0/sumOfWeights());
scale(_histMeanMultiF2_1270 , 1.0/sumOfWeights());
scale(_histMeanMultiF2Prime1525 , 1.0/sumOfWeights());
scale(_histMeanMultiK2Star1430_0 , 1.0/sumOfWeights());
scale(_histMeanMultiBStarStar , 1.0/sumOfWeights());
scale(_histMeanMultiDs1Plus , 1.0/sumOfWeights());
scale(_histMeanMultiDs2Plus , 1.0/sumOfWeights());
scale(_histMeanMultiP , 1.0/sumOfWeights());
scale(_histMeanMultiLambda , 1.0/sumOfWeights());
scale(_histMeanMultiSigma0 , 1.0/sumOfWeights());
scale(_histMeanMultiSigmaMinus , 1.0/sumOfWeights());
scale(_histMeanMultiSigmaPlus , 1.0/sumOfWeights());
scale(_histMeanMultiSigmaPlusMinus , 1.0/sumOfWeights());
scale(_histMeanMultiXiMinus , 1.0/sumOfWeights());
scale(_histMeanMultiDelta1232PlusPlus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385Minus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385Plus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385PlusMinus, 1.0/sumOfWeights());
scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights());
scale(_histMeanMultiOmegaMinus , 1.0/sumOfWeights());
scale(_histMeanMultiLambda_c_Plus , 1.0/sumOfWeights());
scale(_histMeanMultiLambda_b_0 , 1.0/sumOfWeights());
scale(_histMeanMultiLambda1520 , 1.0/sumOfWeights());
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
scale(_histMeanMultiPiPlus , 70.0/sumOfWeights());
scale(_histMeanMultiKPlus , 70.0/sumOfWeights());
scale(_histMeanMultiK0 , 70.0/sumOfWeights());
scale(_histMeanMultiP , 70.0/sumOfWeights());
scale(_histMeanMultiLambda , 70.0/sumOfWeights());
}
}
//@}
private:
Histo1DPtr _histMeanMultiPiPlus;
Histo1DPtr _histMeanMultiPi0;
Histo1DPtr _histMeanMultiKPlus;
Histo1DPtr _histMeanMultiK0;
Histo1DPtr _histMeanMultiEta;
Histo1DPtr _histMeanMultiEtaPrime;
Histo1DPtr _histMeanMultiDPlus;
Histo1DPtr _histMeanMultiD0;
Histo1DPtr _histMeanMultiDPlus_s;
Histo1DPtr _histMeanMultiBPlus_B0_d;
Histo1DPtr _histMeanMultiBPlus_u;
Histo1DPtr _histMeanMultiB0_s;
Histo1DPtr _histMeanMultiF0_980;
Histo1DPtr _histMeanMultiA0_980Plus;
Histo1DPtr _histMeanMultiRho770_0;
Histo1DPtr _histMeanMultiRho770Plus;
Histo1DPtr _histMeanMultiOmega782;
Histo1DPtr _histMeanMultiKStar892Plus;
Histo1DPtr _histMeanMultiKStar892_0;
Histo1DPtr _histMeanMultiPhi1020;
Histo1DPtr _histMeanMultiDStar2010Plus;
Histo1DPtr _histMeanMultiDStar2007_0;
Histo1DPtr _histMeanMultiDStar_s2112Plus;
Histo1DPtr _histMeanMultiBStar;
Histo1DPtr _histMeanMultiJPsi1S;
Histo1DPtr _histMeanMultiPsi2S;
Histo1DPtr _histMeanMultiUpsilon1S;
Histo1DPtr _histMeanMultiF1_1285;
Histo1DPtr _histMeanMultiF1_1420;
Histo1DPtr _histMeanMultiChi_c1_3510;
Histo1DPtr _histMeanMultiF2_1270;
Histo1DPtr _histMeanMultiF2Prime1525;
Histo1DPtr _histMeanMultiK2Star1430Plus;
Histo1DPtr _histMeanMultiK2Star1430_0;
Histo1DPtr _histMeanMultiBStarStar;
Histo1DPtr _histMeanMultiDs1Plus;
Histo1DPtr _histMeanMultiDs2Plus;
Histo1DPtr _histMeanMultiP;
Histo1DPtr _histMeanMultiLambda;
Histo1DPtr _histMeanMultiSigma0;
Histo1DPtr _histMeanMultiSigmaMinus;
Histo1DPtr _histMeanMultiSigmaPlus;
Histo1DPtr _histMeanMultiSigmaPlusMinus;
Histo1DPtr _histMeanMultiXiMinus;
Histo1DPtr _histMeanMultiDelta1232PlusPlus;
Histo1DPtr _histMeanMultiSigma1385Minus;
Histo1DPtr _histMeanMultiSigma1385Plus;
Histo1DPtr _histMeanMultiSigma1385PlusMinus;
Histo1DPtr _histMeanMultiXi1530_0;
Histo1DPtr _histMeanMultiOmegaMinus;
Histo1DPtr _histMeanMultiLambda_c_Plus;
Histo1DPtr _histMeanMultiLambda_b_0;
Histo1DPtr _histMeanMultiSigma_c_PlusPlus_0;
Histo1DPtr _histMeanMultiLambda1520;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(PDG_HADRON_MULTIPLICITIES);
}
diff --git a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc
--- a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc
+++ b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc
@@ -1,764 +1,762 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Implementation of PDG hadron multiplicities as ratios to \f$ \pi^\pm \f$ multiplicity
/// @author Holger Schulz
class PDG_HADRON_MULTIPLICITIES_RATIOS : public Analysis {
public:
/// Constructor
PDG_HADRON_MULTIPLICITIES_RATIOS() : Analysis("PDG_HADRON_MULTIPLICITIES_RATIOS")
- {
- _weightedTotalNumPiPlus = 0;
- }
+ {}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
// Get event weight for histo filling
- const double weight = 1.0;
-
MSG_DEBUG("sqrt(S) = " << sqrtS()/GeV << " GeV");
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _weightedTotalNumPiPlus += weight;
+ _weightedTotalNumPiPlus->fill();
break;
case 111:
- _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight);
+ _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
- _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight);
+ _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
- _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight);
+ _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
- _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight);
+ _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
- _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight);
+ _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
- _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight);
+ _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 9010221:
- _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight);
+ _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 113:
- _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight);
+ _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 223:
- _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight);
+ _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
break;
case 323:
- _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight);
+ _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
- _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight);
+ _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
- _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight);
+ _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
- _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 423:
- _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight);
+ _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid());
break;
case 433:
- _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid());
break;
case 443:
- _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight);
+ _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid());
break;
case 225:
- _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight);
+ _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3212:
- _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight);
+ _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3312:
- _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight);
+ _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 2224:
- _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight);
+ _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid());
break;
case 3114:
- _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
- _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3324:
- _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight);
+ _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
break;
case 3334:
- _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight);
+ _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
- _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight);
+ _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
case 4222:
case 4112:
- _histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid(), weight);
+ _histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid());
break;
case 3124:
- _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight);
+ _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _weightedTotalNumPiPlus += weight;
+ _weightedTotalNumPiPlus->fill();
break;
case 111:
- _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight);
+ _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
- _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight);
+ _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
- _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight);
+ _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
- _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight);
+ _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
- _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight);
+ _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
- _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight);
+ _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 9010221:
- _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight);
+ _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 113:
- _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight);
+ _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 323:
- _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight);
+ _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
- _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight);
+ _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
- _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight);
+ _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
- _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 423:
- _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid(), weight);
+ _histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid());
break;
case 225:
- _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight);
+ _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 325:
- _histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid(), weight);
+ _histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid());
break;
case 315:
- _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight);
+ _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3312:
- _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight);
+ _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 3114:
- _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
- _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3334:
- _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight);
+ _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
- _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight);
+ _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _weightedTotalNumPiPlus += weight;
+ _weightedTotalNumPiPlus->fill();
break;
case 111:
- _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid(), weight);
+ _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
- _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid(), weight);
+ _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
- _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid(), weight);
+ _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
- _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid(), weight);
+ _histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
- _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid(), weight);
+ _histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
- _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid(), weight);
+ _histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 511:
- _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight);
+ _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid());
break;
case 521:
- _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid(), weight);
- _histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid(), weight);
+ _histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid());
+ _histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid());
break;
case 531:
- _histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid(), weight);
+ _histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid());
break;
case 9010221:
- _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid(), weight);
+ _histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 9000211:
- _histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid(), weight);
+ _histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid());
break;
case 113:
- _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid(), weight);
+ _histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 213:
- _histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid(), weight);
+ _histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid());
break;
case 223:
- _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid(), weight);
+ _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
break;
case 323:
- _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid(), weight);
+ _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
- _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid(), weight);
+ _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
- _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid(), weight);
+ _histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
- _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 433:
- _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid(), weight);
+ _histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid());
break;
case 513:
case 523:
case 533:
- _histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid(), weight);
+ _histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid());
break;
case 443:
- _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid(), weight);
+ _histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid());
break;
case 100443:
- _histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid(), weight);
+ _histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid());
break;
case 553:
- _histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid(), weight);
+ _histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid());
break;
case 20223:
- _histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid(), weight);
+ _histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid());
break;
case 20333:
- _histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid(), weight);
+ _histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid());
break;
case 445:
- _histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid(), weight);
+ _histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid());
break;
case 225:
- _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid(), weight);
+ _histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 335:
- _histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid(), weight);
+ _histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid());
break;
case 315:
- _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid(), weight);
+ _histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid());
break;
case 515:
case 525:
case 535:
- _histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid(), weight);
+ _histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid());
break;
case 10433:
case 20433:
- _histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid(), weight);
+ _histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid());
break;
case 435:
- _histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid(), weight);
+ _histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3212:
- _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid(), weight);
+ _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3112:
- _histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid(), weight);
- _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid());
+ _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid());
break;
case 3222:
- _histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid(), weight);
- _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid());
+ _histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid());
break;
case 3312:
- _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid(), weight);
+ _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 2224:
- _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid(), weight);
+ _histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid());
break;
case 3114:
- _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
- _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid(), weight);
- _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid(), weight);
+ _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
+ _histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3324:
- _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid(), weight);
+ _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
break;
case 3334:
- _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid(), weight);
+ _histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
- _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid(), weight);
+ _histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
case 5122:
- _histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid(), weight);
+ _histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid());
break;
case 3124:
- _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid(), weight);
+ _histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
- _weightedTotalNumPiPlus += weight;
+ _weightedTotalNumPiPlus->fill();
break;
case 321:
- _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid(), weight);
+ _histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
- _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid(), weight);
+ _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 2212:
- _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid(), weight);
+ _histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
- _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid(), weight);
+ _histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
}
}
}
}
void init() {
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
+ book(_weightedTotalNumPiPlus, "TMP/PiPlus");
+
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
book(_histMeanMultiPi0 , 2, 1, 1);
book(_histMeanMultiKPlus , 3, 1, 1);
book(_histMeanMultiK0 , 4, 1, 1);
book(_histMeanMultiEta , 5, 1, 1);
book(_histMeanMultiEtaPrime , 6, 1, 1);
book(_histMeanMultiDPlus , 7, 1, 1);
book(_histMeanMultiD0 , 8, 1, 1);
book(_histMeanMultiDPlus_s , 9, 1, 1);
book(_histMeanMultiF0_980 ,13, 1, 1);
book(_histMeanMultiRho770_0 ,15, 1, 1);
book(_histMeanMultiOmega782 ,17, 1, 1);
book(_histMeanMultiKStar892Plus ,18, 1, 1);
book(_histMeanMultiKStar892_0 ,19, 1, 1);
book(_histMeanMultiPhi1020 ,20, 1, 1);
book(_histMeanMultiDStar2010Plus ,21, 1, 1);
book(_histMeanMultiDStar2007_0 ,22, 1, 1);
book(_histMeanMultiDStar_s2112Plus ,23, 1, 1);
book(_histMeanMultiJPsi1S ,25, 1, 1);
book(_histMeanMultiF2_1270 ,31, 1, 1);
book(_histMeanMultiP ,38, 1, 1);
book(_histMeanMultiLambda ,39, 1, 1);
book(_histMeanMultiSigma0 ,40, 1, 1);
book(_histMeanMultiXiMinus ,44, 1, 1);
book(_histMeanMultiDelta1232PlusPlus ,45, 1, 1);
book(_histMeanMultiSigma1385Minus ,46, 1, 1);
book(_histMeanMultiSigma1385Plus ,47, 1, 1);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 1);
book(_histMeanMultiXi1530_0 ,49, 1, 1);
book(_histMeanMultiOmegaMinus ,50, 1, 1);
book(_histMeanMultiLambda_c_Plus ,51, 1, 1);
book(_histMeanMultiSigma_c_PlusPlus_0 ,53, 1, 1);
book(_histMeanMultiLambda1520 ,54, 1, 1);
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
book(_histMeanMultiPi0 , 2, 1, 2);
book(_histMeanMultiKPlus , 3, 1, 2);
book(_histMeanMultiK0 , 4, 1, 2);
book(_histMeanMultiEta , 5, 1, 2);
book(_histMeanMultiEtaPrime , 6, 1, 2);
book(_histMeanMultiDPlus , 7, 1, 2);
book(_histMeanMultiD0 , 8, 1, 2);
book(_histMeanMultiDPlus_s , 9, 1, 2);
book(_histMeanMultiF0_980 ,13, 1, 2);
book(_histMeanMultiRho770_0 ,15, 1, 2);
book(_histMeanMultiKStar892Plus ,18, 1, 2);
book(_histMeanMultiKStar892_0 ,19, 1, 2);
book(_histMeanMultiPhi1020 ,20, 1, 2);
book(_histMeanMultiDStar2010Plus ,21, 1, 2);
book(_histMeanMultiDStar2007_0 ,22, 1, 2);
book(_histMeanMultiF2_1270 ,31, 1, 2);
book(_histMeanMultiK2Star1430Plus ,33, 1, 1);
book(_histMeanMultiK2Star1430_0 ,34, 1, 1);
book(_histMeanMultiP ,38, 1, 2);
book(_histMeanMultiLambda ,39, 1, 2);
book(_histMeanMultiXiMinus ,44, 1, 2);
book(_histMeanMultiSigma1385Minus ,46, 1, 2);
book(_histMeanMultiSigma1385Plus ,47, 1, 2);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 2);
book(_histMeanMultiOmegaMinus ,50, 1, 2);
book(_histMeanMultiLambda_c_Plus ,51, 1, 2);
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
book(_histMeanMultiPi0 , 2, 1, 3);
book(_histMeanMultiKPlus , 3, 1, 3);
book(_histMeanMultiK0 , 4, 1, 3);
book(_histMeanMultiEta , 5, 1, 3);
book(_histMeanMultiEtaPrime , 6, 1, 3);
book(_histMeanMultiDPlus , 7, 1, 3);
book(_histMeanMultiD0 , 8, 1, 3);
book(_histMeanMultiDPlus_s , 9, 1, 3);
book(_histMeanMultiBPlus_B0_d ,10, 1, 1);
book(_histMeanMultiBPlus_u ,11, 1, 1);
book(_histMeanMultiB0_s ,12, 1, 1);
book(_histMeanMultiF0_980 ,13, 1, 3);
book(_histMeanMultiA0_980Plus ,14, 1, 1);
book(_histMeanMultiRho770_0 ,15, 1, 3);
book(_histMeanMultiRho770Plus ,16, 1, 1);
book(_histMeanMultiOmega782 ,17, 1, 2);
book(_histMeanMultiKStar892Plus ,18, 1, 3);
book(_histMeanMultiKStar892_0 ,19, 1, 3);
book(_histMeanMultiPhi1020 ,20, 1, 3);
book(_histMeanMultiDStar2010Plus ,21, 1, 3);
book(_histMeanMultiDStar_s2112Plus ,23, 1, 2);
book(_histMeanMultiBStar ,24, 1, 1);
book(_histMeanMultiJPsi1S ,25, 1, 2);
book(_histMeanMultiPsi2S ,26, 1, 1);
book(_histMeanMultiUpsilon1S ,27, 1, 1);
book(_histMeanMultiF1_1285 ,28, 1, 1);
book(_histMeanMultiF1_1420 ,29, 1, 1);
book(_histMeanMultiChi_c1_3510 ,30, 1, 1);
book(_histMeanMultiF2_1270 ,31, 1, 3);
book(_histMeanMultiF2Prime1525 ,32, 1, 1);
book(_histMeanMultiK2Star1430_0 ,34, 1, 2);
book(_histMeanMultiBStarStar ,35, 1, 1);
book(_histMeanMultiDs1Plus ,36, 1, 1);
book(_histMeanMultiDs2Plus ,37, 1, 1);
book(_histMeanMultiP ,38, 1, 3);
book(_histMeanMultiLambda ,39, 1, 3);
book(_histMeanMultiSigma0 ,40, 1, 2);
book(_histMeanMultiSigmaMinus ,41, 1, 1);
book(_histMeanMultiSigmaPlus ,42, 1, 1);
book(_histMeanMultiSigmaPlusMinus ,43, 1, 1);
book(_histMeanMultiXiMinus ,44, 1, 3);
book(_histMeanMultiDelta1232PlusPlus ,45, 1, 2);
book(_histMeanMultiSigma1385Minus ,46, 1, 3);
book(_histMeanMultiSigma1385Plus ,47, 1, 3);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 3);
book(_histMeanMultiXi1530_0 ,49, 1, 2);
book(_histMeanMultiOmegaMinus ,50, 1, 3);
book(_histMeanMultiLambda_c_Plus ,51, 1, 3);
book(_histMeanMultiLambda_b_0 ,52, 1, 1);
book(_histMeanMultiLambda1520 ,54, 1, 2);
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
book(_histMeanMultiKPlus , 3, 1, 4);
book(_histMeanMultiK0 , 4, 1, 4);
book(_histMeanMultiP ,38, 1, 4);
book(_histMeanMultiLambda ,39, 1, 4);
}
}
// Finalize
void finalize() {
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
scale(_histMeanMultiPi0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiEta , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiEtaPrime , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiD0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDPlus_s , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF0_980 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiRho770_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiOmega782 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKStar892Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKStar892_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiPhi1020 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar2010Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar2007_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar_s2112Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiJPsi1S , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF2_1270 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiP , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiXiMinus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDelta1232PlusPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385Minus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385PlusMinus, 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiXi1530_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiOmegaMinus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda_c_Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma_c_PlusPlus_0, 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda1520 , 1.0/_weightedTotalNumPiPlus);
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
scale(_histMeanMultiPi0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKPlus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiEta , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiEtaPrime , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDPlus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiD0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDPlus_s , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF0_980 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiRho770_0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKStar892Plus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKStar892_0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiPhi1020 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar2010Plus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar2007_0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF2_1270 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK2Star1430Plus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK2Star1430_0 , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiP , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiXiMinus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385Minus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385Plus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385PlusMinus, 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiOmegaMinus , 5.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda_c_Plus , 5.0/_weightedTotalNumPiPlus);
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
scale(_histMeanMultiPi0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiEta , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiEtaPrime , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiD0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDPlus_s , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiBPlus_B0_d , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiBPlus_u , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiB0_s , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF0_980 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiA0_980Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiRho770_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiRho770Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiOmega782 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKStar892Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiKStar892_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiPhi1020 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar2010Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDStar_s2112Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiBStar , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiJPsi1S , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiPsi2S , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiUpsilon1S , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF1_1285 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF1_1420 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiChi_c1_3510 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF2_1270 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiF2Prime1525 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK2Star1430_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiBStarStar , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDs1Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDs2Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiP , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigmaMinus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigmaPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigmaPlusMinus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiXiMinus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiDelta1232PlusPlus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385Minus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiSigma1385PlusMinus, 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiXi1530_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiOmegaMinus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda_c_Plus , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda_b_0 , 1.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda1520 , 1.0/_weightedTotalNumPiPlus);
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
scale(_histMeanMultiKPlus , 70.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiK0 , 70.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiP , 70.0/_weightedTotalNumPiPlus);
scale(_histMeanMultiLambda , 70.0/_weightedTotalNumPiPlus);
}
}
//@}
private:
- double _weightedTotalNumPiPlus;
+ CounterPtr _weightedTotalNumPiPlus;
Histo1DPtr _histMeanMultiPi0;
Histo1DPtr _histMeanMultiKPlus;
Histo1DPtr _histMeanMultiK0;
Histo1DPtr _histMeanMultiEta;
Histo1DPtr _histMeanMultiEtaPrime;
Histo1DPtr _histMeanMultiDPlus;
Histo1DPtr _histMeanMultiD0;
Histo1DPtr _histMeanMultiDPlus_s;
Histo1DPtr _histMeanMultiBPlus_B0_d;
Histo1DPtr _histMeanMultiBPlus_u;
Histo1DPtr _histMeanMultiB0_s;
Histo1DPtr _histMeanMultiF0_980;
Histo1DPtr _histMeanMultiA0_980Plus;
Histo1DPtr _histMeanMultiRho770_0;
Histo1DPtr _histMeanMultiRho770Plus;
Histo1DPtr _histMeanMultiOmega782;
Histo1DPtr _histMeanMultiKStar892Plus;
Histo1DPtr _histMeanMultiKStar892_0;
Histo1DPtr _histMeanMultiPhi1020;
Histo1DPtr _histMeanMultiDStar2010Plus;
Histo1DPtr _histMeanMultiDStar2007_0;
Histo1DPtr _histMeanMultiDStar_s2112Plus;
Histo1DPtr _histMeanMultiBStar;
Histo1DPtr _histMeanMultiJPsi1S;
Histo1DPtr _histMeanMultiPsi2S;
Histo1DPtr _histMeanMultiUpsilon1S;
Histo1DPtr _histMeanMultiF1_1285;
Histo1DPtr _histMeanMultiF1_1420;
Histo1DPtr _histMeanMultiChi_c1_3510;
Histo1DPtr _histMeanMultiF2_1270;
Histo1DPtr _histMeanMultiF2Prime1525;
Histo1DPtr _histMeanMultiK2Star1430Plus;
Histo1DPtr _histMeanMultiK2Star1430_0;
Histo1DPtr _histMeanMultiBStarStar;
Histo1DPtr _histMeanMultiDs1Plus;
Histo1DPtr _histMeanMultiDs2Plus;
Histo1DPtr _histMeanMultiP;
Histo1DPtr _histMeanMultiLambda;
Histo1DPtr _histMeanMultiSigma0;
Histo1DPtr _histMeanMultiSigmaMinus;
Histo1DPtr _histMeanMultiSigmaPlus;
Histo1DPtr _histMeanMultiSigmaPlusMinus;
Histo1DPtr _histMeanMultiXiMinus;
Histo1DPtr _histMeanMultiDelta1232PlusPlus;
Histo1DPtr _histMeanMultiSigma1385Minus;
Histo1DPtr _histMeanMultiSigma1385Plus;
Histo1DPtr _histMeanMultiSigma1385PlusMinus;
Histo1DPtr _histMeanMultiXi1530_0;
Histo1DPtr _histMeanMultiOmegaMinus;
Histo1DPtr _histMeanMultiLambda_c_Plus;
Histo1DPtr _histMeanMultiLambda_b_0;
Histo1DPtr _histMeanMultiSigma_c_PlusPlus_0;
Histo1DPtr _histMeanMultiLambda1520;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(PDG_HADRON_MULTIPLICITIES_RATIOS);
}
diff --git a/analyses/pluginMisc/PDG_TAUS.cc b/analyses/pluginMisc/PDG_TAUS.cc
--- a/analyses/pluginMisc/PDG_TAUS.cc
+++ b/analyses/pluginMisc/PDG_TAUS.cc
@@ -1,214 +1,212 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/TauFinder.hh"
namespace Rivet {
class PDG_TAUS : public Analysis {
public:
/// Constructor
PDG_TAUS()
- : Analysis("PDG_TAUS"),
- _weights_had(0),
- _weights_mu(0),
- _weights_el(0)
+ : Analysis("PDG_TAUS")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
TauFinder tauleptonic(TauFinder::LEPTONIC); // open cuts, leptonic decays
declare(tauleptonic, "TauLeptonic");
TauFinder tauhadronic(TauFinder::HADRONIC); // open cuts, hadronic decays
declare(tauhadronic, "TauHadronic");
populateDecayMap();
book(_h_ratio_mu ,1, 1, 1);
book(_h_ratio_el ,1, 1, 2);
book(_h_1prong_pinu ,2, 1, 1);
book(_h_1prong_Kpnu ,2, 1, 2);
book(_h_1prong_pipinu ,2, 1, 3);
book(_h_1prong_Kppinu ,2, 1, 4);
book(_h_1prong_pipipinu ,2, 1, 5);
book(_h_1prong_Knpinu ,2, 1, 6);
book(_h_3prong_pipipinu ,2, 2, 1);
book(_h_5prong ,2, 3, 1);
+
+ book(_weights_had, "TMP/weights_had");
+ book(_weights_mu, "TMP/weights_mu");
+ book(_weights_el, "TMP/weights_el");
}
/// Perform the per-event analysis
void analyze(const Event& e) {
- const double weight = 1.0;
-
const TauFinder& taulep = apply<TauFinder>(e, "TauLeptonic");
const TauFinder& tauhad = apply<TauFinder>(e, "TauHadronic");
// Hadronic tau decays --- prong decays
foreach(const Particle& tau, tauhad.taus()) {
- _weights_had += weight;
+ _weights_had->fill();
int prongs = countProngs(tau); // number of charged particles among decay products
// Only do 1 prong decays here
if (prongs == 1) {
////// Exclusive decay modes "1-prong"
- if (analyzeDecay(tau, decay_pids["pinu"], true)) _h_1prong_pinu->fill(1, weight);
- if (analyzeDecay(tau, decay_pids["Kpnu"], true)) _h_1prong_Kpnu->fill(1, weight);
- if (analyzeDecay(tau, decay_pids["pipinu"], true)) _h_1prong_pipinu->fill(1, weight);
- if (analyzeDecay(tau, decay_pids["Kppinu"] , true)) _h_1prong_Kppinu->fill(1, weight);
- if (analyzeDecay(tau, decay_pids["pipipinu"], true)) _h_1prong_pipipinu->fill(1, weight);
+ if (analyzeDecay(tau, decay_pids["pinu"], true)) _h_1prong_pinu->fill(1);
+ if (analyzeDecay(tau, decay_pids["Kpnu"], true)) _h_1prong_Kpnu->fill(1);
+ if (analyzeDecay(tau, decay_pids["pipinu"], true)) _h_1prong_pipinu->fill(1);
+ if (analyzeDecay(tau, decay_pids["Kppinu"] , true)) _h_1prong_Kppinu->fill(1);
+ if (analyzeDecay(tau, decay_pids["pipipinu"], true)) _h_1prong_pipipinu->fill(1);
// Kshort, Klong --- (twice) filling the K0 labelled PDG histo
- if (analyzeDecay(tau, decay_pids["KSpinu"] , true)) _h_1prong_Knpinu->fill(1, weight);
- if (analyzeDecay(tau, decay_pids["KLpinu"] , true)) _h_1prong_Knpinu->fill(1, weight);
+ if (analyzeDecay(tau, decay_pids["KSpinu"] , true)) _h_1prong_Knpinu->fill(1);
+ if (analyzeDecay(tau, decay_pids["KLpinu"] , true)) _h_1prong_Knpinu->fill(1);
}
else if (prongs == 3) {
- if (analyzeDecay(tau, decay_pids["3pipipinu"], true)) _h_3prong_pipipinu->fill(1, weight);
+ if (analyzeDecay(tau, decay_pids["3pipipinu"], true)) _h_3prong_pipipinu->fill(1);
}
- else if (prongs == 5 && !any(tau.children(), HasAbsPID(310))) _h_5prong->fill(1, weight);
+ else if (prongs == 5 && !any(tau.children(), HasAbsPID(310))) _h_5prong->fill(1);
}
// Leptonic tau decays --- look for radiative and non-radiative 1 prong decays
foreach(const Particle& tau, taulep.taus()) {
int prongs = countProngs(tau); // number of charged particles among decay products
// Only do 1 prong decays here
if (prongs == 1) {
- analyzeRadiativeDecay(tau, decay_pids["muids"], _weights_mu, weight, true, _h_ratio_mu);
- analyzeRadiativeDecay(tau, decay_pids["elids"], _weights_el, weight, true, _h_ratio_el);
+ analyzeRadiativeDecay(tau, decay_pids["muids"], _weights_mu, true, _h_ratio_mu);
+ analyzeRadiativeDecay(tau, decay_pids["elids"], _weights_el, true, _h_ratio_el);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_ratio_mu, 1./_weights_mu);
scale(_h_ratio_el, 1./_weights_el);
- const double norm = _weights_had + _weights_mu + _weights_el;
+ const double norm = double(_weights_had) + double(_weights_mu) + double(_weights_el);
scale(_h_1prong_pinu, 1./norm);
scale(_h_1prong_Kpnu, 1./norm);
scale(_h_1prong_pipinu, 1./norm);
scale(_h_1prong_Kppinu, 1./norm);
scale(_h_1prong_pipipinu, 1./norm);
scale(_h_1prong_Knpinu, 1./norm);
scale(_h_3prong_pipipinu, 1./norm);
scale(_h_5prong, 1./norm);
}
// Short hand
bool contains(Particle& mother, int id, bool abs=false) {
if (abs) return any(mother.children(), HasAbsPID(id));
return any(mother.children(), HasPID(id));
}
// Count charged decay products
int countProngs(Particle mother) {
int n_prongs = 0;
foreach(Particle p, mother.children())
if (p.threeCharge()!=0) ++n_prongs;
return n_prongs;
}
// Set up a lookup table for decays
void populateDecayMap() {
decay_pids["muids"] = {{ 13,14,16 }};
decay_pids["elids"] = {{ 11,12,16 }};
decay_pids["pinu"] = {{ 211,16 }};
decay_pids["Kpnu"] = {{ 321,16 }};
decay_pids["pipinu"] = {{ 111,211,16 }};
decay_pids["Kppinu"] = {{ 111,321,16 }};
decay_pids["pipipinu"] = {{ 111,111,211,16 }};
decay_pids["KSpinu"] = {{ 211,310,16 }};
decay_pids["KLpinu"] = {{ 211,130,16 }};
decay_pids["3pipipinu"] = {{ 211,211,211,16 }};
}
bool analyzeDecay(Particle mother, vector<int> ids, bool absolute) {
// There is no point in looking for decays with less particles than to be analysed
if (mother.children().size() == ids.size()) {
bool decayfound = true;
foreach (int id, ids) {
if (!contains(mother, id, absolute)) decayfound = false;
}
return decayfound;
} // end of first if
return false;
}
// Look for radiative (and non-radiative) tau decays to fill a ratio histo
- void analyzeRadiativeDecay(Particle mother, vector<int> ids, double &w_incl, double e_weight, bool absolute, Histo1DPtr h_ratio) {
+ void analyzeRadiativeDecay(Particle mother, vector<int> ids, CounterPtr &w_incl, bool absolute, Histo1DPtr h_ratio) {
// w_incl ... reference to a global weight counter for all leptonic tau decays
- // e_weight ... the current event weight
- // h_ratio ... pointer to ratio histo --- filled with e_weight in case of radiative events only
-
+ // h_ratio ... pointer to ratio histo
+
// There is no point in looking for decays with less particles than to be analysed
if (mother.children().size() >= ids.size()) {
bool decayfound = true;
foreach (int id, ids) {
if (!contains(mother, id, absolute)) decayfound = false;
}
// Do not increment counters if the specified decay products were not found
if (decayfound) {
- w_incl += e_weight; // the (global) weight counter for leptonic decays
+ w_incl->fill(); // the (global) weight counter for leptonic decays
bool radiative = any(mother.children(), HasPID(PID::PHOTON));
// Only fill the histo if there is a radiative decay
if (radiative) {
// Iterate over decay products to find photon with 5 MeV energy
foreach (const Particle& son, mother.children()) {
if (son.pid() == PID::PHOTON) {
// Require photons to have at least 5 MeV energy in the rest frame of the tau
// boosted taus
if (!mother.momentum().betaVec().isZero()) {
LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mother.momentum().betaVec());
if (cms_boost.transform(son.momentum())[0]/MeV > 5.) {
- h_ratio->fill(1, e_weight);
+ h_ratio->fill(1);
break;
}
}
// not boosted taus
else {
if (son.momentum()[0]/MeV > 5.) {
- h_ratio->fill(1, e_weight);
+ h_ratio->fill(1);
break;
}
}
}
} // end loop over decay products
} // end of radiative
} // end of decayfound
} // end of first if
}
private:
/// @name Histograms
//@{
Histo1DPtr _h_ratio_mu, _h_ratio_el;
Histo1DPtr _h_1prong_pinu, _h_1prong_Kpnu, _h_1prong_Kppinu, _h_1prong_pipinu, _h_1prong_pipipinu, _h_1prong_Knpinu;
Histo1DPtr _h_3prong_pipipinu;
Histo1DPtr _h_5prong;
//@}
- double _weights_had, _weights_mu, _weights_el;
+ CounterPtr _weights_had, _weights_mu, _weights_el;
map<string, vector<int> > decay_pids;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(PDG_TAUS);
}
diff --git a/analyses/pluginMisc/SFM_1984_S1178091.cc b/analyses/pluginMisc/SFM_1984_S1178091.cc
--- a/analyses/pluginMisc/SFM_1984_S1178091.cc
+++ b/analyses/pluginMisc/SFM_1984_S1178091.cc
@@ -1,100 +1,99 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief SFM charged multiplicities in NSD and inelastic minbias events
class SFM_1984_S1178091 : public Analysis {
public:
/// Constructor
SFM_1984_S1178091() : Analysis("SFM_1984_S1178091") {}
/// @name Analysis methods
//@{
void init() {
// Projections
//
declare(ChargedFinalState(Cuts::absrap<5 && Cuts::pT>250*MeV && Cuts::pT<3*GeV), "FS");
// Histograms
if (fuzzyEquals(sqrtS()/GeV, 30.4, 1E-1)) {
book(_hist_multiplicity_inel ,1, 1, 1);
book(_hist_multiplicity_nsd ,2, 1, 1);
} else if (fuzzyEquals(sqrtS(), 44.5, 1E-1)) {
book(_hist_multiplicity_inel ,1, 1, 2);
book(_hist_multiplicity_nsd ,2, 1, 2);
} else if (fuzzyEquals(sqrtS(), 52.2, 1E-1)) {
book(_hist_multiplicity_inel ,1, 1, 3);
book(_hist_multiplicity_nsd ,2, 1, 3);
} else if (fuzzyEquals(sqrtS(), 62.2, 1E-1)) {
book(_hist_multiplicity_inel ,1, 1, 4);
book(_hist_multiplicity_nsd ,2, 1, 4);
}
}
// Analyse each event
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState& fs = apply<ChargedFinalState>(event, "FS");
// Trigger
if (fs.particles().size() <1 ) vetoEvent;
// Event classification:
int n_left(0), n_right(0), n_large_x(0);
foreach (const Particle& p, fs.particles()) {
// Calculate the particles' Feynman x
const double x_feyn = 2.0 * fabs(p.pz())/sqrtS();
if (x_feyn > 0.8 ) n_large_x += 1;
// Pseudorapidity
const double eta = p.eta();
if (eta > 0.0) n_right += 1;
else if (eta < 0.0) n_left += 1;
}
MSG_DEBUG("N_left: " << n_left << ", "
<< "N_right: " << n_right << ", "
<< "N_large_x: " << n_large_x);
// Single diffractive: either one large x particle or 0 particles in the one hemisphere but more than 7 in the other hemisphere
bool isDiffractive = (n_large_x == 1) || ( ((n_left==0) && (fs.particles().size() < 7)) || ((n_right==0) && (fs.particles().size() < 7)) );
- _hist_multiplicity_inel->fill(fs.particles().size(), weight);
- if (!isDiffractive) _hist_multiplicity_nsd->fill(fs.particles().size(), weight);
+ _hist_multiplicity_inel->fill(fs.particles().size());
+ if (!isDiffractive) _hist_multiplicity_nsd->fill(fs.particles().size());
}
void finalize() {
normalize(_hist_multiplicity_inel);
normalize(_hist_multiplicity_nsd);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _hist_multiplicity_inel;
Histo1DPtr _hist_multiplicity_nsd;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(SFM_1984_S1178091);
}
diff --git a/analyses/pluginPetra/JADE_1998_S3612880.cc b/analyses/pluginPetra/JADE_1998_S3612880.cc
--- a/analyses/pluginPetra/JADE_1998_S3612880.cc
+++ b/analyses/pluginPetra/JADE_1998_S3612880.cc
@@ -1,133 +1,132 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
/// @todo Include more projections as required, e.g. ChargedFinalState, FastJets, ZFinder...
namespace Rivet {
class JADE_1998_S3612880 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
JADE_1998_S3612880()
: Analysis("JADE_1998_S3612880")
{
/// @todo Set whether your finalize method needs the generator cross section
}
/// Book histograms and initialise projections before the run
void init() {
const ChargedFinalState cfs(-MAXDOUBLE, MAXDOUBLE, 0.1/GeV);
declare(cfs, "CFS");
declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets");
// Thrust
const Thrust thrust(cfs);
declare(thrust, "Thrust");
declare(Hemispheres(thrust), "Hemispheres");
// Histos
int offset = 0;
switch (int(sqrtS()/GeV)) {
case 44:
offset = 0;
book(_h_thrust , 2+offset, 1, 1);
book(_h_MH , 3 + offset, 1, 1);
book(_h_BT , 4 + offset, 1, 1);
book(_h_BW , 5 + offset, 1, 1);
book(_h_y23 ,10, 1, 1);
break;
case 35:
offset = 4;
book(_h_thrust , 2+offset, 1, 1);
book(_h_MH , 3 + offset, 1, 1);
book(_h_BT , 4 + offset, 1, 1);
book(_h_BW , 5 + offset, 1, 1);
book(_h_y23 ,11, 1, 1);
break;
case 22:
book(_h_y23 ,12, 1, 1);
break;
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
// JADE hadronic event selection TODO: move this into a trigger definition
if (cfs.particles().size() < 3 ) {
vetoEvent;
}
const Thrust& thrust = apply<Thrust>(event, "Thrust");
const Vector3 & thrustAxis = thrust.thrustAxis ();
double theta = thrustAxis.theta();
if ( fabs(cos(theta)) >= 0.8 ) {
MSG_DEBUG("Failed thrust angle cut: " << fabs(cos(theta)));
vetoEvent;
}
// TODO Evis, pmiss, pbal
const Hemispheres& hemi = apply<Hemispheres>(event, "Hemispheres");
const FastJets& durjet = apply<FastJets>(event, "DurhamJets");
double y23 = durjet.clusterSeq()->exclusive_ymerge_max(2);
// Make sure we don't run into a segfault by trying to fill non-existing histos
int s = int(sqrtS()/GeV);
if (s == 44 || s == 35) {
- _h_thrust->fill(1. - thrust.thrust(), weight);
- _h_MH->fill(sqrt(hemi.scaledM2high()), weight);
- _h_BT->fill(hemi.Bsum(), weight);
- _h_BW->fill(hemi.Bmax(), weight);
+ _h_thrust->fill(1. - thrust.thrust());
+ _h_MH->fill(sqrt(hemi.scaledM2high()));
+ _h_BT->fill(hemi.Bsum());
+ _h_BW->fill(hemi.Bmax());
}
- _h_y23->fill(y23, weight);
+ _h_y23->fill(y23);
}
/// Normalise histograms etc., after the run
void finalize() {
// Make sure we don't try to normalise non-existing histos
int s = int(sqrtS()/GeV);
if (s == 44 || s == 35) {
normalize(_h_thrust);
normalize(_h_MH);
normalize(_h_BT);
normalize(_h_BW);
}
normalize(_h_y23);
}
//@}
private:
Histo1DPtr _h_thrust;
Histo1DPtr _h_MH;
Histo1DPtr _h_BT;
Histo1DPtr _h_BW;
Histo1DPtr _h_y23;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(JADE_1998_S3612880);
}
diff --git a/analyses/pluginPetra/JADE_OPAL_2000_S4300807.cc b/analyses/pluginPetra/JADE_OPAL_2000_S4300807.cc
--- a/analyses/pluginPetra/JADE_OPAL_2000_S4300807.cc
+++ b/analyses/pluginPetra/JADE_OPAL_2000_S4300807.cc
@@ -1,187 +1,186 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/FinalState.hh"
namespace Rivet {
/// @brief Jet rates in \f$ e^+ e^- \f$ at OPAL and JADE
/// @author Frank Siegert
class JADE_OPAL_2000_S4300807 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
JADE_OPAL_2000_S4300807()
: Analysis("JADE_OPAL_2000_S4300807")
{ }
//@}
/// @name Analysis methods
//@{
void init() {
// Projections
const FinalState fs;
declare(fs, "FS");
FastJets jadeJets = FastJets(fs, FastJets::JADE, 0.7);
FastJets durhamJets = FastJets(fs, FastJets::DURHAM, 0.7);
jadeJets.useInvisibles(true);
durhamJets.useInvisibles(true);
declare(jadeJets, "JadeJets");
declare(durhamJets, "DurhamJets");
// Histos
int offset = 0;
switch (int(sqrtS()/GeV + 0.5)) {
case 35: offset = 7; break;
case 44: offset = 8; break;
case 91: offset = 9; break;
case 133: offset = 10; break;
case 161: offset = 11; break;
case 172: offset = 12; break;
case 183: offset = 13; break;
case 189: offset = 14; break;
default: break;
}
for (size_t i = 0; i < 5; ++i) {
book(_h_R_Jade[i] ,offset, 1, i+1);
book(_h_R_Durham[i] ,offset+9, 1, i+1);
if (i < 4) book(_h_y_Durham[i] ,offset+17, 1, i+1);
}
}
void analyze(const Event& e) {
- const double weight = 1.0;
MSG_DEBUG("Num particles = " << apply<FinalState>(e, "FS").particles().size());
const FastJets& jadejet = apply<FastJets>(e, "JadeJets");
if (jadejet.clusterSeq()) {
const double y_23 = jadejet.clusterSeq()->exclusive_ymerge_max(2);
const double y_34 = jadejet.clusterSeq()->exclusive_ymerge_max(3);
const double y_45 = jadejet.clusterSeq()->exclusive_ymerge_max(4);
const double y_56 = jadejet.clusterSeq()->exclusive_ymerge_max(5);
for (size_t i = 0; i < _h_R_Jade[0]->numBins(); ++i) {
double ycut = _h_R_Jade[0]->bin(i).xMid();
double width = _h_R_Jade[0]->bin(i).xWidth();
if (y_23 < ycut) {
- _h_R_Jade[0]->fillBin(i, weight*width);
+ _h_R_Jade[0]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Jade[1]->numBins(); ++i) {
double ycut = _h_R_Jade[1]->bin(i).xMid();
double width = _h_R_Jade[1]->bin(i).xWidth();
if (y_34 < ycut && y_23 > ycut) {
- _h_R_Jade[1]->fillBin(i, weight*width);
+ _h_R_Jade[1]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Jade[2]->numBins(); ++i) {
double ycut = _h_R_Jade[2]->bin(i).xMid();
double width = _h_R_Jade[2]->bin(i).xWidth();
if (y_45 < ycut && y_34 > ycut) {
- _h_R_Jade[2]->fillBin(i, weight*width);
+ _h_R_Jade[2]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Jade[3]->numBins(); ++i) {
double ycut = _h_R_Jade[3]->bin(i).xMid();
double width = _h_R_Jade[3]->bin(i).xWidth();
if (y_56 < ycut && y_45 > ycut) {
- _h_R_Jade[3]->fillBin(i, weight*width);
+ _h_R_Jade[3]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Jade[4]->numBins(); ++i) {
double ycut = _h_R_Jade[4]->bin(i).xMid();
double width = _h_R_Jade[4]->bin(i).xWidth();
if (y_56 > ycut) {
- _h_R_Jade[4]->fillBin(i, weight*width);
+ _h_R_Jade[4]->fillBin(i, width);
}
}
}
const FastJets& durjet = apply<FastJets>(e, "DurhamJets");
if (durjet.clusterSeq()) {
const double y_23 = durjet.clusterSeq()->exclusive_ymerge_max(2);
const double y_34 = durjet.clusterSeq()->exclusive_ymerge_max(3);
const double y_45 = durjet.clusterSeq()->exclusive_ymerge_max(4);
const double y_56 = durjet.clusterSeq()->exclusive_ymerge_max(5);
- _h_y_Durham[0]->fill(y_23, weight);
- _h_y_Durham[1]->fill(y_34, weight);
- _h_y_Durham[2]->fill(y_45, weight);
- _h_y_Durham[3]->fill(y_56, weight);
+ _h_y_Durham[0]->fill(y_23);
+ _h_y_Durham[1]->fill(y_34);
+ _h_y_Durham[2]->fill(y_45);
+ _h_y_Durham[3]->fill(y_56);
for (size_t i = 0; i < _h_R_Durham[0]->numBins(); ++i) {
double ycut = _h_R_Durham[0]->bin(i).xMid();
double width = _h_R_Durham[0]->bin(i).xWidth();
if (y_23 < ycut) {
- _h_R_Durham[0]->fillBin(i, weight*width);
+ _h_R_Durham[0]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Durham[1]->numBins(); ++i) {
double ycut = _h_R_Durham[1]->bin(i).xMid();
double width = _h_R_Durham[1]->bin(i).xWidth();
if (y_34 < ycut && y_23 > ycut) {
- _h_R_Durham[1]->fillBin(i, weight*width);
+ _h_R_Durham[1]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Durham[2]->numBins(); ++i) {
double ycut = _h_R_Durham[2]->bin(i).xMid();
double width = _h_R_Durham[2]->bin(i).xWidth();
if (y_45 < ycut && y_34 > ycut) {
- _h_R_Durham[2]->fillBin(i, weight*width);
+ _h_R_Durham[2]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Durham[3]->numBins(); ++i) {
double ycut = _h_R_Durham[3]->bin(i).xMid();
double width = _h_R_Durham[3]->bin(i).xWidth();
if (y_56 < ycut && y_45 > ycut) {
- _h_R_Durham[3]->fillBin(i, weight*width);
+ _h_R_Durham[3]->fillBin(i, width);
}
}
for (size_t i = 0; i < _h_R_Durham[4]->numBins(); ++i) {
double ycut = _h_R_Durham[4]->bin(i).xMid();
double width = _h_R_Durham[4]->bin(i).xWidth();
if (y_56 > ycut) {
- _h_R_Durham[4]->fillBin(i, weight*width);
+ _h_R_Durham[4]->fillBin(i, width);
}
}
}
}
/// Finalize
void finalize() {
for (size_t n = 0; n < 4; ++n) normalize(_h_y_Durham[n]);
for (size_t n = 0; n < 5; ++n) scale(_h_R_Jade[n], 100/sumOfWeights());
for (size_t n = 0; n < 5; ++n) scale(_h_R_Durham[n], 100/sumOfWeights());
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_R_Jade[5];
Histo1DPtr _h_R_Durham[5];
Histo1DPtr _h_y_Durham[4];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(JADE_OPAL_2000_S4300807);
}
diff --git a/analyses/pluginPetra/TASSO_1990_S2148048.cc b/analyses/pluginPetra/TASSO_1990_S2148048.cc
--- a/analyses/pluginPetra/TASSO_1990_S2148048.cc
+++ b/analyses/pluginPetra/TASSO_1990_S2148048.cc
@@ -1,166 +1,165 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
//#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
/// @todo Include more projections as required, e.g. ChargedFinalState, FastJets, ZFinder...
namespace Rivet {
class TASSO_1990_S2148048 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
TASSO_1990_S2148048()
: Analysis("TASSO_1990_S2148048")
{
/// @todo Set whether your finalize method needs the generator cross section
//_sumWPassed = 0;
}
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
const ChargedFinalState cfs(-MAXDOUBLE, MAXDOUBLE, 0.1/GeV);
declare(cfs, "CFS");
//// Beams -- needed for x_p calculation
//declare(Beam(), "Beams");
// Thrust
declare(Thrust(cfs), "Thrust");
// For Sphericity and the like
declare(Sphericity(cfs), "Sphericity");
// Histos
int offset = 0;
switch (int(sqrtS()/GeV)) {
case 14:
offset = 0;
break;
case 22:
offset = 1;
break;
case 35:
offset = 2;
break;
case 44:
offset = 3;
break;
}
//book(_h_xp , 2, 1, 1+offset);
book(_h_sphericity , 6, 1, 1+offset);
book(_h_aplanarity , 7, 1, 1+offset);
book(_h_thrust , 8, 1, 1+offset);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = 1.0;
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
//// Get beams and average beam momentum
//const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
//const double meanBeamMom = ( beams.first.p3().mod() +
//beams.second.p3().mod() ) / 2.0;
// TASSO hadronic event selection TODO: move this into a trigger definition
// See page 2 in publication
// Condition 1) --- require at least 5 (4) 'good' tracks
int nch = cfs.particles().size();
if ( (int(sqrtS()/GeV) > 27 && nch < 5) || (int(sqrtS()/GeV) <= 27 && nch < 4 ) ) {
MSG_DEBUG("Failed # good tracks cut: " << nch);
vetoEvent;
}
// Condition 2) ---
// Condition 5) --- scalar momentum (not pT!!!) sum >= 0.265*s
double momsum = 0.0;
foreach (const Particle& p, cfs.particles()) {
const double mom = p.p3().mod();
momsum += mom;
}
if (momsum <=0.265 * sqrtS()/GeV) {
MSG_DEBUG("Failed pTsum cut: " << momsum << " < " << 0.265 * sqrtS()/GeV);
vetoEvent;
}
// Raise counter for events that pass trigger conditions
//_sumWPassed += 1.0;
const Thrust& thrust = apply<Thrust>(event, "Thrust");
//const Vector3 & thrustAxis = thrust.thrustAxis ();
//double theta = thrustAxis.theta();
//if ( fabs(cos(theta)) >= 0.8 ) {
//MSG_DEBUG("Failed thrust angle cut: " << fabs(cos(theta)));
//vetoEvent;
//}
const Sphericity& sphericity = apply<Sphericity>(event, "Sphericity");
//// Fill histograms in order of appearance in paper
//foreach (const Particle& p, cfs.particles()) {
//// Get momentum and energy of each particle.
//const Vector3 mom3 = p.p3();
//// Scaled momenta.
//const double mom = mom3.mod();
//const double scaledMom = mom/meanBeamMom;
- //_h_xp->fill(scaledMom, weight);
+ //_h_xp->fill(scaledMom);
//}
//
- _h_sphericity->fill(sphericity.sphericity(), weight);
- _h_aplanarity->fill(sphericity.aplanarity(), weight);
- _h_thrust->fill(thrust.thrust(), weight);
+ _h_sphericity->fill(sphericity.sphericity());
+ _h_aplanarity->fill(sphericity.aplanarity());
+ _h_thrust->fill(thrust.thrust());
}
/// Normalise histograms etc., after the run
void finalize() {
//scale(_h_xp, _sumWPassed/(crossSection()*sumOfWeights()));
normalize(_h_sphericity);
normalize(_h_aplanarity);
normalize(_h_thrust );
}
//@}
private:
// Data members like post-cuts event weight counters go here
- //double _sumWPassed;
+ //CounterPtr _sumWPassed;
private:
/// @name Histograms
//@{
Histo1DPtr _h_xp ;
Histo1DPtr _h_sphericity;
Histo1DPtr _h_aplanarity;
Histo1DPtr _h_thrust ;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(TASSO_1990_S2148048);
}
diff --git a/analyses/pluginRHIC/STAR_2006_S6500200.cc b/analyses/pluginRHIC/STAR_2006_S6500200.cc
--- a/analyses/pluginRHIC/STAR_2006_S6500200.cc
+++ b/analyses/pluginRHIC/STAR_2006_S6500200.cc
@@ -1,108 +1,108 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
namespace Rivet {
/// @brief STAR identified hadron spectra in pp at 200 GeV
class STAR_2006_S6500200 : public Analysis {
public:
/// Constructor
STAR_2006_S6500200()
- : Analysis("STAR_2006_S6500200"),
- _sumWeightSelected(0.0)
+ : Analysis("STAR_2006_S6500200")
{ }
/// Book projections and histograms
void init() {
ChargedFinalState bbc1(-5.0,-3.3, 0.0*GeV); // beam-beam-counter trigger
ChargedFinalState bbc2( 3.3, 5.0, 0.0*GeV); // beam-beam-counter trigger
declare(bbc1, "BBC1");
declare(bbc2, "BBC2");
IdentifiedFinalState pionfs(Cuts::abseta < 2.5 && Cuts::pT > 0.3*GeV);
IdentifiedFinalState protonfs(Cuts::abseta < 2.5 && Cuts::pT > 0.4*GeV);
pionfs.acceptIdPair(PID::PIPLUS);
protonfs.acceptIdPair(PID::PROTON);
declare(pionfs, "PionFS");
declare(protonfs, "ProtonFS");
book(_h_pT_piplus ,1, 1, 1); // full range pion binning
book(_h_pT_piminus ,1, 2, 1); // full range pion binning
book(_tmp_pT_piplus ,"TMP/pT_piplus", refData(2, 3, 1)); // pi histo compatible with more restricted proton binning
book(_tmp_pT_piminus ,"TMP/pT_piminus", refData(2, 4, 1)); // pi histo compatible with more restricted proton binning
book(_h_pT_proton ,1, 3, 1);
book(_h_pT_antiproton ,1, 4, 1);
book(_s_piminus_piplus, 2, 1, 1);
book(_s_antipr_pr , 2, 2, 1);
book(_s_pr_piplus , 2, 3, 1);
book(_s_antipr_piminus, 2, 4, 1);
+
+ book(_sumWeightSelected, "sumWeightSelected");
}
/// Do the analysis
void analyze(const Event& event) {
const ChargedFinalState& bbc1 = apply<ChargedFinalState>(event, "BBC1");
const ChargedFinalState& bbc2 = apply<ChargedFinalState>(event, "BBC2");
if (bbc1.size() < 1 || bbc2.size() < 1) {
MSG_DEBUG("Failed beam-beam-counter trigger");
vetoEvent;
}
- const double weight = 1.0;
-
const IdentifiedFinalState& pionfs = apply<IdentifiedFinalState>(event, "PionFS");
foreach (const Particle& p, pionfs.particles()) {
if (p.absrap() < 0.5) {
/// @todo Use a binned counter to avoid this bin width cancellation hack
const double pT = p.pT() / GeV;
- ((p.pid() > 0) ? _h_pT_piplus : _h_pT_piminus)->fill(pT, weight/pT);
- ((p.pid() > 0) ? _tmp_pT_piplus : _tmp_pT_piminus)->fill(pT, weight/pT);
+ ((p.pid() > 0) ? _h_pT_piplus : _h_pT_piminus)->fill(pT, 1.0/pT);
+ ((p.pid() > 0) ? _tmp_pT_piplus : _tmp_pT_piminus)->fill(pT, 1.0/pT);
}
}
const IdentifiedFinalState& protonfs = apply<IdentifiedFinalState>(event, "ProtonFS");
foreach (const Particle& p, protonfs.particles()) {
if (p.absrap() < 0.5) {
/// @todo Use a binned counter to avoid this bin width cancellation hack
const double pT = p.pT() / GeV;
- ((p.pid() > 0) ? _h_pT_proton : _h_pT_antiproton)->fill(pT, weight/pT);
+ ((p.pid() > 0) ? _h_pT_proton : _h_pT_antiproton)->fill(pT, 1.0/pT);
}
}
- _sumWeightSelected += 1.0;
+ _sumWeightSelected->fill();
}
/// Finalize
void finalize() {
divide(_h_pT_piminus, _h_pT_piplus, _s_piminus_piplus);
divide(_h_pT_antiproton, _h_pT_proton, _s_antipr_pr);
divide(_h_pT_proton, _tmp_pT_piplus, _s_pr_piplus);
divide(_h_pT_antiproton, _tmp_pT_piminus, _s_antipr_piminus);
- scale(_h_pT_piplus, 1/(2*M_PI*_sumWeightSelected));
- scale(_h_pT_piminus, 1/(2*M_PI*_sumWeightSelected));
- scale(_h_pT_proton, 1/(2*M_PI*_sumWeightSelected));
- scale(_h_pT_antiproton, 1/(2*M_PI*_sumWeightSelected));
+ const double factor = 1/(2*M_PI*double(_sumWeightSelected));
+ scale(_h_pT_piplus, factor);
+ scale(_h_pT_piminus, factor);
+ scale(_h_pT_proton, factor);
+ scale(_h_pT_antiproton, factor);
}
private:
- double _sumWeightSelected;
+ CounterPtr _sumWeightSelected;
Histo1DPtr _h_pT_piplus, _h_pT_piminus, _h_pT_proton, _h_pT_antiproton;
Histo1DPtr _tmp_pT_piplus, _tmp_pT_piminus;
Scatter2DPtr _s_piminus_piplus, _s_antipr_pr, _s_pr_piplus, _s_antipr_piminus;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2006_S6500200);
}
diff --git a/analyses/pluginRHIC/STAR_2006_S6860818.cc b/analyses/pluginRHIC/STAR_2006_S6860818.cc
--- a/analyses/pluginRHIC/STAR_2006_S6860818.cc
+++ b/analyses/pluginRHIC/STAR_2006_S6860818.cc
@@ -1,193 +1,195 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief STAR strange particle spectra in pp at 200 GeV
class STAR_2006_S6860818 : public Analysis {
public:
/// Constructor
STAR_2006_S6860818()
- : Analysis("STAR_2006_S6860818"),
- _sumWeightSelected(0.0)
+ : Analysis("STAR_2006_S6860818")
{
for (size_t i = 0; i < 4; i++) {
_nBaryon[i] = 0;
_nAntiBaryon[i] = 0;
- _nWeightedBaryon[i] = 0.;
- _nWeightedAntiBaryon[i] = 0.;
}
}
/// Book projections and histograms
void init() {
ChargedFinalState bbc1(Cuts::etaIn(-5.0, -3.5)); // beam-beam-counter trigger
ChargedFinalState bbc2(Cuts::etaIn( 3.5, 5.0)); // beam-beam-counter trigger
declare(bbc1, "BBC1");
declare(bbc2, "BBC2");
UnstableFinalState ufs(Cuts::abseta < 2.5);
declare(ufs, "UFS");
book(_h_pT_k0s ,1, 1, 1);
book(_h_pT_kminus ,1, 2, 1);
book(_h_pT_kplus ,1, 3, 1);
book(_h_pT_lambda ,1, 4, 1);
book(_h_pT_lambdabar ,1, 5, 1);
book(_h_pT_ximinus ,1, 6, 1);
book(_h_pT_xiplus ,1, 7, 1);
//book(_h_pT_omega ,1, 8, 1);
book(_h_antibaryon_baryon_ratio, 2, 1, 1);
book(_h_lambar_lam, 2, 2, 1);
book(_h_xiplus_ximinus, 2, 3, 1);
book(_h_pT_vs_mass ,3, 1, 1);
+
+ for (size_t i = 0; i < 4; i++) {
+ book(_nWeightedBaryon[i], "TMP/nWeightedBaryon"+to_str(i));
+ book(_nWeightedAntiBaryon[i], "TMP/nWeightedBaryon"+to_str(i));
+ }
+ book(_sumWeightSelected, "sumWselected");
}
/// Do the analysis
void analyze(const Event& event) {
const ChargedFinalState& bbc1 = apply<ChargedFinalState>(event, "BBC1");
const ChargedFinalState& bbc2 = apply<ChargedFinalState>(event, "BBC2");
if (bbc1.size()<1 || bbc2.size()<1) {
MSG_DEBUG("Failed beam-beam-counter trigger");
vetoEvent;
}
- const double weight = 1.0;
-
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
foreach (const Particle& p, ufs.particles()) {
if (p.absrap() < 0.5) {
const PdgId pid = p.pid();
const double pT = p.pT() / GeV;
switch (abs(pid)) {
case PID::PIPLUS:
- if (pid < 0) _h_pT_vs_mass->fill(0.1396, pT, weight);
+ if (pid < 0) _h_pT_vs_mass->fill(0.1396, pT);
break;
case PID::PROTON:
- if (pid < 0) _h_pT_vs_mass->fill(0.9383, pT, weight);
+ if (pid < 0) _h_pT_vs_mass->fill(0.9383, pT);
if (pT > 0.4) {
pid > 0 ? _nBaryon[0]++ : _nAntiBaryon[0]++;
- pid > 0 ? _nWeightedBaryon[0]+=weight : _nWeightedAntiBaryon[0]+=weight;
+ pid > 0 ? _nWeightedBaryon[0]->fill() : _nWeightedAntiBaryon[0]->fill();
}
break;
case PID::K0S:
if (pT > 0.2) {
- _h_pT_k0s->fill(pT, weight/pT);
+ _h_pT_k0s->fill(pT, 1.0/pT);
}
- _h_pT_vs_mass->fill(0.5056, pT, weight);
+ _h_pT_vs_mass->fill(0.5056, pT);
break;
case PID::K0L:
- _h_pT_vs_mass->fill(0.5056, pT, weight);
+ _h_pT_vs_mass->fill(0.5056, pT);
break;
case 113: // rho0(770)
- _h_pT_vs_mass->fill(0.7755, pT, weight);
+ _h_pT_vs_mass->fill(0.7755, pT);
break;
case 313: // K0*(892)
- _h_pT_vs_mass->fill(0.8960, pT, weight);
+ _h_pT_vs_mass->fill(0.8960, pT);
break;
case 333: // phi(1020)
- _h_pT_vs_mass->fill(1.0190, pT, weight);
+ _h_pT_vs_mass->fill(1.0190, pT);
break;
case 3214: // Sigma(1385)
- _h_pT_vs_mass->fill(1.3840, pT, weight);
+ _h_pT_vs_mass->fill(1.3840, pT);
break;
case 3124: // Lambda(1520)
- _h_pT_vs_mass->fill(1.5200, pT, weight);
+ _h_pT_vs_mass->fill(1.5200, pT);
break;
case PID::KPLUS:
- if (pid < 0) _h_pT_vs_mass->fill(0.4856, pT, weight);
+ if (pid < 0) _h_pT_vs_mass->fill(0.4856, pT);
if (pT > 0.2) {
- pid > 0 ? _h_pT_kplus->fill(pT, weight/pT) : _h_pT_kminus->fill(pT, weight/pT);
+ pid > 0 ? _h_pT_kplus->fill(pT, 1.0/pT) : _h_pT_kminus->fill(pT, 1.0/pT);
}
break;
case PID::LAMBDA:
- pid > 0 ? _h_pT_vs_mass->fill(1.1050, pT, weight) : _h_pT_vs_mass->fill(1.1250, pT, weight);
+ pid > 0 ? _h_pT_vs_mass->fill(1.1050, pT) : _h_pT_vs_mass->fill(1.1250, pT);
if (pT > 0.3) {
- pid > 0 ? _h_pT_lambda->fill(pT, weight/pT) : _h_pT_lambdabar->fill(pT, weight/pT);
+ pid > 0 ? _h_pT_lambda->fill(pT, 1.0/pT) : _h_pT_lambdabar->fill(pT, 1.0/pT);
pid > 0 ? _nBaryon[1]++ : _nAntiBaryon[1]++;
- pid > 0 ? _nWeightedBaryon[1]+=weight : _nWeightedAntiBaryon[1]+=weight;
+ pid > 0 ? _nWeightedBaryon[1]->fill() : _nWeightedAntiBaryon[1]->fill();
}
break;
case PID::XIMINUS:
- pid > 0 ? _h_pT_vs_mass->fill(1.3120, pT, weight) : _h_pT_vs_mass->fill(1.3320, pT, weight);
+ pid > 0 ? _h_pT_vs_mass->fill(1.3120, pT) : _h_pT_vs_mass->fill(1.3320, pT);
if (pT > 0.5) {
- pid > 0 ? _h_pT_ximinus->fill(pT, weight/pT) : _h_pT_xiplus->fill(pT, weight/pT);
+ pid > 0 ? _h_pT_ximinus->fill(pT, 1.0/pT) : _h_pT_xiplus->fill(pT, 1.0/pT);
pid > 0 ? _nBaryon[2]++ : _nAntiBaryon[2]++;
- pid > 0 ? _nWeightedBaryon[2]+=weight : _nWeightedAntiBaryon[2]+=weight;
+ pid > 0 ? _nWeightedBaryon[2]->fill() : _nWeightedAntiBaryon[2]->fill();
}
break;
case PID::OMEGAMINUS:
- _h_pT_vs_mass->fill(1.6720, pT, weight);
+ _h_pT_vs_mass->fill(1.6720, pT);
if (pT > 0.5) {
- //_h_pT_omega->fill(pT, weight/pT);
+ //_h_pT_omega->fill(pT, 1.0/pT);
pid > 0 ? _nBaryon[3]++ : _nAntiBaryon[3]++;
- pid > 0 ? _nWeightedBaryon[3]+=weight : _nWeightedAntiBaryon[3]+=weight;
+ pid > 0 ? _nWeightedBaryon[3]->fill() : _nWeightedAntiBaryon[3]->fill();
}
break;
}
}
}
- _sumWeightSelected += 1.0;
+ _sumWeightSelected->fill();
}
/// Finalize
void finalize() {
std::vector<Point2D> points;
for (size_t i=0 ; i<4 ; i++) {
if (_nWeightedBaryon[i]==0 || _nWeightedAntiBaryon[i]==0) {
points.push_back(Point2D(i,0,0.5,0));
} else {
double y = _nWeightedAntiBaryon[i]/_nWeightedBaryon[i];
double dy = sqrt( 1./_nAntiBaryon[i] + 1./_nBaryon[i] );
points.push_back(Point2D(i,y,0.5,y*dy));
}
}
_h_antibaryon_baryon_ratio->addPoints( points );
divide(_h_pT_lambdabar,_h_pT_lambda, _h_lambar_lam);
divide(_h_pT_xiplus,_h_pT_ximinus, _h_xiplus_ximinus);
- scale(_h_pT_k0s, 1./(2*M_PI*_sumWeightSelected));
- scale(_h_pT_kminus, 1./(2*M_PI*_sumWeightSelected));
- scale(_h_pT_kplus, 1./(2*M_PI*_sumWeightSelected));
- scale(_h_pT_lambda, 1./(2*M_PI*_sumWeightSelected));
- scale(_h_pT_lambdabar, 1./(2*M_PI*_sumWeightSelected));
- scale(_h_pT_ximinus, 1./(2*M_PI*_sumWeightSelected));
- scale(_h_pT_xiplus, 1./(2*M_PI*_sumWeightSelected));
+ const double factor = 1./(2*M_PI*double(_sumWeightSelected));
+ scale(_h_pT_k0s, factor);
+ scale(_h_pT_kminus, factor);
+ scale(_h_pT_kplus, factor);
+ scale(_h_pT_lambda, factor);
+ scale(_h_pT_lambdabar, factor);
+ scale(_h_pT_ximinus, factor);
+ scale(_h_pT_xiplus, factor);
//scale(_h_pT_omega, 1./(2*M_PI*_sumWeightSelected));
MSG_DEBUG("sumOfWeights() = " << sumOfWeights());
- MSG_DEBUG("_sumWeightSelected = " << _sumWeightSelected);
+ MSG_DEBUG("_sumWeightSelected = " << double(_sumWeightSelected));
}
private:
- double _sumWeightSelected;
- int _nBaryon[4];
- int _nAntiBaryon[4];
- double _nWeightedBaryon[4];
- double _nWeightedAntiBaryon[4];
+ CounterPtr _sumWeightSelected;
+ array<int,4> _nBaryon;
+ array<int,4> _nAntiBaryon;
+ array<CounterPtr, 4> _nWeightedBaryon;
+ array<CounterPtr, 4> _nWeightedAntiBaryon;
Histo1DPtr _h_pT_k0s, _h_pT_kminus, _h_pT_kplus, _h_pT_lambda, _h_pT_lambdabar, _h_pT_ximinus, _h_pT_xiplus;
//Histo1DPtr _h_pT_omega;
Scatter2DPtr _h_antibaryon_baryon_ratio;
Profile1DPtr _h_pT_vs_mass;
Scatter2DPtr _h_lambar_lam;
Scatter2DPtr _h_xiplus_ximinus;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2006_S6860818);
}
diff --git a/analyses/pluginRHIC/STAR_2006_S6870392.cc b/analyses/pluginRHIC/STAR_2006_S6870392.cc
--- a/analyses/pluginRHIC/STAR_2006_S6870392.cc
+++ b/analyses/pluginRHIC/STAR_2006_S6870392.cc
@@ -1,88 +1,86 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief STAR inclusive jet cross-section in pp at 200 GeV
class STAR_2006_S6870392 : public Analysis {
public:
/// Constructor
STAR_2006_S6870392()
: Analysis("STAR_2006_S6870392")
{ }
/// @name Analysis methods
//@{
/// Book projections and histograms
void init() {
FinalState fs(-2.0, 2.0);
declare(fs, "FS");
declare(FastJets(fs, FastJets::CDFMIDPOINT, 0.4,
JetAlg::ALL_MUONS, JetAlg::NO_INVISIBLES,
nullptr, 0.5), "MidpointJets");
book(_h_jet_pT_MB ,1, 1, 1);
book(_h_jet_pT_HT ,2, 1, 1);
}
/// Do the analysis
void analyze(const Event& event) {
- const double weight = 1.0;
-
// Skip if the event is empty
const FinalState& fs = apply<FinalState>(event, "FS");
if (fs.empty()) {
MSG_DEBUG("Skipping event " << numEvents() << " because no final state found ");
vetoEvent;
}
// Find jets
const FastJets& jetpro = apply<FastJets>(event, "MidpointJets");
const Jets& jets = jetpro.jetsByPt();
if (!jets.empty()) {
const Jet& j1 = jets.front();
if (inRange(fabs(j1.eta()), 0.2, 0.8)) {
foreach (const Jet& j, jets) {
const FourMomentum pj = j.momentum();
- _h_jet_pT_MB->fill(pj.pT(), weight);
- _h_jet_pT_HT->fill(pj.pT(), weight);
+ _h_jet_pT_MB->fill(pj.pT());
+ _h_jet_pT_HT->fill(pj.pT());
}
}
}
}
/// Finalize
void finalize() {
double normalisation = crossSection()/picobarn/sumOfWeights()/(2*0.6*2*M_PI);
scale(_h_jet_pT_MB, normalisation);
scale(_h_jet_pT_HT, normalisation);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet_pT_MB;
Histo1DPtr _h_jet_pT_HT;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2006_S6870392);
}
diff --git a/analyses/pluginRHIC/STAR_2008_S7869363.cc b/analyses/pluginRHIC/STAR_2008_S7869363.cc
--- a/analyses/pluginRHIC/STAR_2008_S7869363.cc
+++ b/analyses/pluginRHIC/STAR_2008_S7869363.cc
@@ -1,168 +1,173 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/LossyFinalState.hh"
namespace Rivet {
/// @todo Replace with SmearedParticles
class STARRandomFilter {
public:
STARRandomFilter() { }
// Return true to throw away a particle
bool operator()(const Particle& p) {
/// @todo Use a better RNG?
size_t idx = int(floor(p.pT()/MeV/50));
if (idx > 11) idx = 11;
return (rand()/static_cast<double>(RAND_MAX) > _trkeff[idx]);
}
int compare(const STARRandomFilter& other) const {
return true;
}
private:
const static double _trkeff[12];
};
// Here we have the track reconstruction efficiencies for tracks with pT from 0 to 600 MeV
// in steps of 50 MeV. The efficiency is assumed to be 0.88 for pT >= 600 MeV
const double STARRandomFilter::_trkeff[12] = {0,0,0.38,0.72,0.78,0.81,0.82,0.84,0.85,0.86,0.87,0.88};
class STAR_2008_S7869363 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
STAR_2008_S7869363()
- : Analysis("STAR_2008_S7869363"),
- nCutsPassed(0),
- nPi(0), nPiPlus(0), nKaon(0), nKaonPlus(0), nProton(0), nAntiProton(0)
+ : Analysis("STAR_2008_S7869363")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
const ChargedFinalState cfs(-0.5, 0.5, 0.2*GeV);
const LossyFinalState<STARRandomFilter> lfs(cfs, STARRandomFilter());
declare(lfs, "FS");
book(_h_dNch ,1, 1, 1);
book(_h_dpT_Pi ,2, 1, 1);
book(_h_dpT_Piplus ,2, 1, 2);
book(_h_dpT_Kaon ,2, 1, 3);
book(_h_dpT_Kaonplus ,2, 1, 4);
book(_h_dpT_AntiProton ,2, 1, 5);
book(_h_dpT_Proton ,2, 1, 6);
+ book(nCutsPassed, "nCutsPassed");
+ book(nPi, "nPi");
+ book(nPiPlus, "nPiPlus");
+ book(nKaon, "nKaon");
+ book(nKaonPlus, "nKaonPlus");
+ book(nProton, "nProton");
+ book(nAntiProton, "nAntiProton");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const FinalState& charged = apply<FinalState>(event, "FS");
// Vertex reconstruction efficiencies as a function of charged multiplicity.
// For events with more than 23 reconstructed tracks the efficiency is 100%.
double vtxeffs[24] = { 0.000000,0.512667,0.739365,0.847131,0.906946,0.940922,0.959328,0.96997,
0.975838,0.984432,0.988311,0.990327,0.990758,0.995767,0.99412,0.992271,
0.996631,0.994802,0.99635,0.997384,0.998986,0.996441,0.994513,1.000000 };
double vtxeff = 1.0;
if (charged.particles().size() < 24) {
vtxeff = vtxeffs[charged.particles().size()];
}
- const double weight = vtxeff * 1.0;
+ const double weight = vtxeff;
foreach (const Particle& p, charged.particles()) {
double pT = p.pT()/GeV;
double y = p.rapidity();
if (fabs(y) < 0.1) {
- nCutsPassed += weight;
+ nCutsPassed->fill(weight);
const PdgId id = p.pid();
switch (id) {
case -211:
_h_dpT_Pi->fill(pT, weight/(TWOPI*pT*0.2));
- nPi += weight;
+ nPi->fill(weight);
break;
case 211:
_h_dpT_Piplus->fill(pT, weight/(TWOPI*pT*0.2));
- nPiPlus += weight;
+ nPiPlus->fill(weight);
break;
case -321:
_h_dpT_Kaon->fill(pT, weight/(TWOPI*pT*0.2));
- nKaon += weight;
+ nKaon->fill(weight);
break;
case 321:
_h_dpT_Kaonplus->fill(pT, weight/(TWOPI*pT*0.2));
- nKaonPlus += weight;
+ nKaonPlus->fill(weight);
break;
case -2212:
_h_dpT_AntiProton->fill(pT, weight/(TWOPI*pT*0.2));
- nAntiProton += weight;
+ nAntiProton->fill(weight);
break;
case 2212:
_h_dpT_Proton->fill(pT, weight/(TWOPI*pT*0.2));
- nProton += weight;
+ nProton->fill(weight);
break;
}
}
else {
continue;
}
}
_h_dNch->fill(charged.particles().size(), weight);
}
/// Normalise histograms etc., after the run
void finalize() {
//double nTot = nPi + nPiPlus + nKaon + nKaonPlus + nProton + nAntiProton;
normalize(_h_dNch);
/// @todo Norm to data!
normalize(_h_dpT_Pi , 0.389825 );
normalize(_h_dpT_Piplus , 0.396025 );
normalize(_h_dpT_Kaon , 0.03897 );
normalize(_h_dpT_Kaonplus , 0.04046 );
normalize(_h_dpT_AntiProton, 0.0187255);
normalize(_h_dpT_Proton , 0.016511 );
}
private:
Histo1DPtr _h_dNch;
Histo1DPtr _h_dpT_Pi, _h_dpT_Piplus;
Histo1DPtr _h_dpT_Kaon, _h_dpT_Kaonplus;
Histo1DPtr _h_dpT_AntiProton, _h_dpT_Proton;
Profile1DPtr _h_pT_vs_Nch;
- double nCutsPassed, nPi, nPiPlus, nKaon, nKaonPlus, nProton, nAntiProton;
+ CounterPtr nCutsPassed, nPi, nPiPlus, nKaon, nKaonPlus, nProton, nAntiProton;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2008_S7869363);
}
diff --git a/analyses/pluginRHIC/STAR_2008_S7993412.cc b/analyses/pluginRHIC/STAR_2008_S7993412.cc
--- a/analyses/pluginRHIC/STAR_2008_S7993412.cc
+++ b/analyses/pluginRHIC/STAR_2008_S7993412.cc
@@ -1,81 +1,79 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief STAR di-hadron correlations in d-Au at 200 GeV
class STAR_2008_S7993412 : public Analysis {
public:
STAR_2008_S7993412() : Analysis("STAR_2008_S7993412")
{
}
/// @name Analysis methods
//@{
/// Book projections and histograms
void init() {
ChargedFinalState fs(-1.0, 1.0, 1.0*GeV);
declare(fs, "FS");
book(_h_Y_jet_trigger ,1, 1, 1);
book(_h_Y_jet_associated ,2, 1, 1);
}
/// Do the analysis
void analyze(const Event& event) {
// Skip if the event is empty
const FinalState& fs = apply<FinalState>(event, "FS");
if (fs.empty()) {
MSG_DEBUG("Skipping event " << numEvents() << " because no final state found ");
vetoEvent;
}
- const double weight = 1.0;
-
foreach (const Particle& tp, fs.particles()) {
const double triggerpT = tp.pT();
if (triggerpT >= 2.0 && triggerpT < 5.0) {
int n_associated = 0;
foreach (const Particle& ap, fs.particles()) {
if (!inRange(ap.pT()/GeV, 1.5, triggerpT)) continue;
if (deltaPhi(tp.phi(), ap.phi()) > 1) continue;
if (fabs(tp.eta() - ap.eta()) > 1.75) continue;
n_associated += 1;
}
//const double dPhidEta = 2 * 2*1.75;
- //_h_Y_jet_trigger->fill(triggerpT, n_associated/dPhidEta, weight);
- _h_Y_jet_trigger->fill(triggerpT, n_associated, weight);
+ //_h_Y_jet_trigger->fill(triggerpT, n_associated/dPhidEta);
+ _h_Y_jet_trigger->fill(triggerpT, n_associated);
}
}
}
/// Finalize
void finalize() {
}
//@}
private:
/// @name Histograms
//@{
Profile1DPtr _h_Y_jet_trigger;
Profile1DPtr _h_Y_jet_associated;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2008_S7993412);
}
diff --git a/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc b/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc
--- a/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc
+++ b/analyses/pluginRHIC/STAR_2009_UE_HELEN.cc
@@ -1,168 +1,165 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/NeutralFinalState.hh"
#include "Rivet/Projections/MergedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "fastjet/SISConePlugin.hh"
namespace Rivet {
/// @brief STAR underlying event
/// @author Hendrik Hoeth
class STAR_2009_UE_HELEN : public Analysis {
public:
/// Constructor
STAR_2009_UE_HELEN()
: Analysis("STAR_2009_UE_HELEN")
{
}
/// @name Analysis methods
//@{
void init() {
// Charged final state, |eta|<1, pT>0.2GeV
const ChargedFinalState cfs(-1.0, 1.0, 0.2*GeV);
declare(cfs, "CFS");
// Neutral final state, |eta|<1, ET>0.2GeV (needed for the jets)
const NeutralFinalState nfs(-1.0, 1.0, 0.2*GeV);
declare(nfs, "NFS");
// STAR can't see neutrons and K^0_L
VetoedFinalState vfs(nfs);
vfs.vetoNeutrinos();
vfs.addVetoPairId(PID::K0L);
vfs.addVetoPairId(PID::NEUTRON);
declare(vfs, "VFS");
// Jets are reconstructed from charged and neutral particles,
// and the cuts are different (pT vs. ET), so we need to merge them.
const MergedFinalState jfs(cfs, vfs);
declare(jfs, "JFS");
// SISCone, R = 0.7, overlap_threshold = 0.75
declare(FastJets(jfs, FastJets::SISCONE, 0.7), "AllJets");
// Book histograms
book(_hist_pmaxnchg , 1, 1, 1);
book(_hist_pminnchg , 2, 1, 1);
book(_hist_anchg , 3, 1, 1);
}
// Do the analysis
void analyze(const Event& e) {
const FinalState& cfs = apply<ChargedFinalState>(e, "CFS");
if (cfs.particles().size() < 1) {
MSG_DEBUG("Failed multiplicity cut");
vetoEvent;
}
const Jets& alljets = apply<FastJets>(e, "AllJets").jetsByPt();
MSG_DEBUG("Total jet multiplicity = " << alljets.size());
// The jet acceptance region is |eta|<(1-R)=0.3 (with R = jet radius)
// Jets also must have a neutral energy fraction of < 0.7
Jets jets;
foreach (const Jet jet, alljets) {
if (jet.neutralEnergy()/jet.totalEnergy() < 0.7 &&
jet.abseta() < 0.3)
jets.push_back(jet);
}
// This analysis requires a di-jet like event.
// WARNING: There is more data in preparation, some of which
// does _not_ have this constraint!
if (jets.size() != 2) {
MSG_DEBUG("Failed jet multiplicity cut");
vetoEvent;
}
// The di-jet constraints in this analysis are:
// - 2 and only 2 jets in the acceptance region
// - delta(Phi) between the jets is > 150 degrees
// - Pt_awayjet/Pt_towards_jet > 0.7
if (deltaPhi(jets[0].phi(), jets[1].phi()) <= 5*PI/6 ||
jets[1].pT()/jets[0].pT() <= 0.7)
{
MSG_DEBUG("Failed di-jet criteria");
vetoEvent;
}
// Now lets start ...
const double jetphi = jets[0].phi();
const double jetpT = jets[0].pT();
- // Get the event weight
- const double weight = 1.0;
-
size_t numTrans1(0), numTrans2(0), numAway(0);
// Calculate all the charged stuff
foreach (const Particle& p, cfs.particles()) {
const double dPhi = deltaPhi(p.phi(), jetphi);
const double pT = p.pT();
const double phi = p.phi();
double rotatedphi = phi - jetphi;
while (rotatedphi < 0) rotatedphi += 2*PI;
// @TODO: WARNING: The following lines are a hack to correct
// for the STAR tracking efficiency. Once we have the
// final numbers (corrected to hadron level), we need
// to remove this!!!!
if (1.0*rand()/static_cast<double>(RAND_MAX) > 0.87834-exp(-1.48994-0.788432*pT)) {
continue;
}
// -------- end of efficiency hack -------
if (dPhi < PI/3.0) {
// toward
}
else if (dPhi < 2*PI/3.0) {
if (rotatedphi <= PI) {
++numTrans1;
}
else {
++numTrans2;
}
}
else {
++numAway;
}
} // end charged particle loop
// Fill the histograms
- _hist_pmaxnchg->fill(jetpT, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3), weight);
- _hist_pminnchg->fill(jetpT, (numTrans1<numTrans2 ? numTrans1 : numTrans2)/(2*PI/3), weight);
- _hist_anchg->fill(jetpT, numAway/(PI*0.7*0.7), weight); // jet area = pi*R^2
+ _hist_pmaxnchg->fill(jetpT, (numTrans1>numTrans2 ? numTrans1 : numTrans2)/(2*PI/3));
+ _hist_pminnchg->fill(jetpT, (numTrans1<numTrans2 ? numTrans1 : numTrans2)/(2*PI/3));
+ _hist_anchg->fill(jetpT, numAway/(PI*0.7*0.7)); // jet area = pi*R^2
}
void finalize() {
//
}
//@}
private:
Profile1DPtr _hist_pmaxnchg;
Profile1DPtr _hist_pminnchg;
Profile1DPtr _hist_anchg;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2009_UE_HELEN);
}
diff --git a/analyses/pluginSPS/UA1_1990_S2044935.cc b/analyses/pluginSPS/UA1_1990_S2044935.cc
--- a/analyses/pluginSPS/UA1_1990_S2044935.cc
+++ b/analyses/pluginSPS/UA1_1990_S2044935.cc
@@ -1,177 +1,176 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
namespace Rivet {
/// @brief UA1 minbias track multiplicities, \f$ p_\perp \f$ and \f$ E_\perp \f$
class UA1_1990_S2044935 : public Analysis {
public:
/// Constructor
UA1_1990_S2044935() : Analysis("UA1_1990_S2044935") {
- _sumwTrig = 0;
- _sumwTrig08 = 0;
- _sumwTrig40 = 0;
- _sumwTrig80 = 0;
}
/// @name Analysis methods
//@{
/// Book projections and histograms
void init() {
declare(ChargedFinalState(-5.5, 5.5), "TriggerFS");
declare(ChargedFinalState(-2.5, 2.5), "TrackFS");
const FinalState trkcalofs(-2.5, 2.5);
declare(MissingMomentum(trkcalofs), "MET25");
const FinalState calofs(-6.0, 6.0);
declare(MissingMomentum(calofs), "MET60");
if (fuzzyEquals(sqrtS()/GeV, 63)) {
book(_hist_Pt ,8,1,1);
} else if (fuzzyEquals(sqrtS()/GeV, 200)) {
book(_hist_Nch ,1,1,1);
book(_hist_Esigd3p ,2,1,1);
book(_hist_Pt ,6,1,1);
book(_hist_Et ,9,1,1);
book(_hist_Etavg ,12,1,1);
} else if (fuzzyEquals(sqrtS()/GeV, 500)) {
book(_hist_Nch ,1,1,2);
book(_hist_Esigd3p ,2,1,2);
book(_hist_Et ,10,1,1);
book(_hist_Etavg ,12,1,2);
} else if (fuzzyEquals(sqrtS()/GeV, 900)) {
book(_hist_Nch ,1,1,3);
book(_hist_Esigd3p ,2,1,3);
book(_hist_Pt ,7,1,1);
book(_hist_Et ,11,1,1);
book(_hist_Etavg ,12,1,3);
book(_hist_Esigd3p08 ,3,1,1);
book(_hist_Esigd3p40 ,4,1,1);
book(_hist_Esigd3p80 ,5,1,1);
}
-
+ book(_sumwTrig, "TMP/sumwTrig");
+ book(_sumwTrig08, "TMP/sumwTrig08");
+ book(_sumwTrig40, "TMP/sumwTrig40");
+ book(_sumwTrig80, "TMP/sumwTrig80");
+
}
void analyze(const Event& event) {
// Trigger
const FinalState& trigfs = apply<FinalState>(event, "TriggerFS");
unsigned int n_minus(0), n_plus(0);
foreach (const Particle& p, trigfs.particles()) {
const double eta = p.eta();
if (inRange(eta, -5.5, -1.5)) n_minus++;
else if (inRange(eta, 1.5, 5.5)) n_plus++;
}
MSG_DEBUG("Trigger -: " << n_minus << ", Trigger +: " << n_plus);
if (n_plus == 0 || n_minus == 0) vetoEvent;
- const double weight = 1.0;
- _sumwTrig += weight;
+ _sumwTrig->fill();
// Use good central detector tracks
const FinalState& cfs = apply<FinalState>(event, "TrackFS");
const double Et25 = apply<MissingMomentum>(event, "MET25").scalarEt();
const double Et60 = apply<MissingMomentum>(event, "MET60").scalarEt();
const unsigned int nch = cfs.size();
// Event level histos
if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) {
- _hist_Nch->fill(nch, weight);
- _hist_Et->fill(Et60/GeV, weight);
- _hist_Etavg->fill(nch, Et25/GeV, weight);
+ _hist_Nch->fill(nch);
+ _hist_Et->fill(Et60/GeV);
+ _hist_Etavg->fill(nch, Et25/GeV);
}
// Particle/track level histos
const double deta = 2 * 5.0;
const double dphi = TWOPI;
const double dnch_deta = nch/deta;
foreach (const Particle& p, cfs.particles()) {
const double pt = p.pT();
- const double scaled_weight = weight/(deta*dphi*pt/GeV);
+ const double scaled_weight = 1.0/(deta*dphi*pt/GeV);
if (!fuzzyEquals(sqrtS()/GeV, 500, 1E-3)) {
- _hist_Pt->fill(nch, pt/GeV, weight);
+ _hist_Pt->fill(nch, pt/GeV);
}
if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) {
_hist_Esigd3p->fill(pt/GeV, scaled_weight);
}
// Also fill for specific dn/deta ranges at 900 GeV
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
if (inRange(dnch_deta, 0.8, 4.0)) {
- _sumwTrig08 += weight;
+ _sumwTrig08 ->fill();
_hist_Esigd3p08->fill(pt/GeV, scaled_weight);
} else if (inRange(dnch_deta, 4.0, 8.0)) {
- _sumwTrig40 += weight;
+ _sumwTrig40 ->fill();
_hist_Esigd3p40->fill(pt/GeV, scaled_weight);
} else {
//MSG_WARNING(dnch_deta);
if (dnch_deta > 8.0) {
- _sumwTrig80 += weight;
+ _sumwTrig80 ->fill();
_hist_Esigd3p80->fill(pt/GeV, scaled_weight);
}
}
}
}
}
void finalize() {
if (_sumwTrig <= 0) {
MSG_WARNING("No events passed the trigger!");
return;
}
const double xsec = crossSectionPerEvent();
if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) {
scale(_hist_Nch, 2*xsec/millibarn); ///< Factor of 2 for Nch bin widths?
scale(_hist_Esigd3p, xsec/millibarn);
scale(_hist_Et, xsec/millibarn);
}
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
// NB. Ref data is normalised to a fixed value not reproducible from MC.
const double scale08 = (_hist_Esigd3p08->bin(0).area() > 0) ?
0.933e5/_hist_Esigd3p08->bin(0).height() : 0;
scale(_hist_Esigd3p08, scale08);
const double scale40 = (_hist_Esigd3p40->bin(0).area() > 0) ?
1.369e5/_hist_Esigd3p40->bin(0).height() : 0;
scale(_hist_Esigd3p40, scale40);
const double scale80 = (_hist_Esigd3p80->bin(0).area() > 0) ?
1.657e5/_hist_Esigd3p80->bin(0).height() : 0;
scale(_hist_Esigd3p80, scale80);
}
}
//@}
private:
/// @name Weight counters
//@{
- double _sumwTrig, _sumwTrig08, _sumwTrig40, _sumwTrig80;
+ CounterPtr _sumwTrig, _sumwTrig08, _sumwTrig40, _sumwTrig80;
//@}
/// @name Histogram collections
//@{
Histo1DPtr _hist_Nch;
Histo1DPtr _hist_Esigd3p;
Histo1DPtr _hist_Esigd3p08;
Histo1DPtr _hist_Esigd3p40;
Histo1DPtr _hist_Esigd3p80;
Profile1DPtr _hist_Pt;
Profile1DPtr _hist_Etavg;
Histo1DPtr _hist_Et;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA1_1990_S2044935);
}
diff --git a/analyses/pluginSPS/UA5_1982_S875503.cc b/analyses/pluginSPS/UA5_1982_S875503.cc
--- a/analyses/pluginSPS/UA5_1982_S875503.cc
+++ b/analyses/pluginSPS/UA5_1982_S875503.cc
@@ -1,93 +1,93 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief UA5 multiplicity and \f$ \eta \f$ distributions
class UA5_1982_S875503 : public Analysis {
public:
/// Default constructor
UA5_1982_S875503() : Analysis("UA5_1982_S875503") {
- _sumWTrig = 0;
}
/// @name Analysis methods
//@{
/// Set up projections and book histos
void init() {
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(-3.5, 3.5), "CFS");
// Book histos based on pp or ppbar beams
if (beamIds().first == beamIds().second) {
book(_hist_nch ,2,1,1);
book(_hist_eta ,3,1,1);
} else {
book(_hist_nch ,2,1,2);
book(_hist_eta ,4,1,1);
}
+ book(_sumWTrig, "sumW");
+
}
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.nsdDecision()) vetoEvent;
- const double weight = 1.0;
- _sumWTrig += weight;
+ _sumWTrig->fill();
// Get tracks
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
// Fill mean charged multiplicity histos
- _hist_nch->fill(_hist_nch->bin(0).xMid(), cfs.size()*weight);
+ _hist_nch->fill(_hist_nch->bin(0).xMid(), cfs.size());
// Iterate over all tracks and fill eta histograms
foreach (const Particle& p, cfs.particles()) {
const double eta = p.abseta();
- _hist_eta->fill(eta, weight);
+ _hist_eta->fill(eta);
}
}
void finalize() {
/// @todo Why the factor of 2 on Nch for ppbar?
if (beamIds().first == beamIds().second) {
scale(_hist_nch, 1.0/_sumWTrig);
} else {
scale(_hist_nch, 0.5/_sumWTrig);
}
scale(_hist_eta, 0.5/_sumWTrig);
}
//@}
private:
/// @name Counters
//@{
- double _sumWTrig;
+ CounterPtr _sumWTrig;
//@}
/// @name Histogram collections
//@{
Histo1DPtr _hist_nch;
Histo1DPtr _hist_eta;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1982_S875503);
}
diff --git a/analyses/pluginSPS/UA5_1986_S1583476.cc b/analyses/pluginSPS/UA5_1986_S1583476.cc
--- a/analyses/pluginSPS/UA5_1986_S1583476.cc
+++ b/analyses/pluginSPS/UA5_1986_S1583476.cc
@@ -1,121 +1,123 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief UA5 \f$ \eta \f$ distributions at 200 and 900 GeV
class UA5_1986_S1583476 : public Analysis {
public:
/// Constructor
UA5_1986_S1583476() : Analysis("UA5_1986_S1583476") {
- _sumWTrig = 0;
- _sumWTrigNSD = 0;
}
/// @name Analysis methods
//@{
/// Set up projections and histograms
void init() {
declare(TriggerUA5(), "Trigger");
declare(Beam(), "Beams");
declare(ChargedFinalState(-5.0, 5.0), "CFS50");
// Histograms
if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) {
book(_hist_eta_nsd ,1,1,1);
book(_hist_eta_inelastic ,1,1,2);
_hists_eta_nsd.resize(6);
for (int i = 1; i <= 6; ++i) {
- _sumWn += 0.0;
+ _sumWn.push_back({});
+ book(_sumWn.back(), "TMP/sumWn"+to_str(i));
book(_hists_eta_nsd[i-1],2,1,i);
}
} else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) {
book(_hist_eta_nsd ,1,1,3);
book(_hist_eta_inelastic ,1,1,4);
_hists_eta_nsd.resize(9);
for (int i = 1; i <= 9; ++i) {
- _sumWn += 0.0;
+ _sumWn.push_back({});
+ book(_sumWn.back(), "TMP/sumWn"+to_str(i));
book(_hists_eta_nsd[i-1],3,1,i);
}
}
+ book(_sumWTrig, "sumWtrig");
+ book(_sumWTrigNSD, "sumWtrigNSD");
+
}
/// Fill eta histograms (in Nch bins)
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.sdDecision()) vetoEvent;
const bool isNSD = trigger.nsdDecision();
// Get the index corresponding to the max Nch range histo/sum(w) vector index
const ChargedFinalState& cfs50 = apply<ChargedFinalState>(event, "CFS50");
const int numP = cfs50.size();
const int ni = (int)floor(static_cast<float>(numP-2)/10.0);
const int num_idx = min(ni, (int)_sumWn.size()-1);
MSG_TRACE("Multiplicity index: " << numP << " charged particles -> #" << num_idx);
// Update weights
- const double weight = 1.0;
- _sumWTrig += weight;
+ _sumWTrig->fill();
if (isNSD) {
- _sumWTrigNSD += weight;
- if (num_idx >= 0) _sumWn[num_idx] += weight;
+ _sumWTrigNSD->fill();
+ if (num_idx >= 0) _sumWn[num_idx]->fill();
}
// Fill histos
foreach (const Particle& p, cfs50.particles()) {
const double eta = p.abseta();
- _hist_eta_inelastic->fill(eta, weight);
+ _hist_eta_inelastic->fill(eta);
if (isNSD) {
- _hist_eta_nsd->fill(eta, weight);
- if (num_idx >= 0) _hists_eta_nsd[num_idx]->fill(eta, weight);
+ _hist_eta_nsd->fill(eta);
+ if (num_idx >= 0) _hists_eta_nsd[num_idx]->fill(eta);
}
}
}
/// Scale histos
void finalize() {
- MSG_DEBUG("sumW_NSD,inel = " << _sumWTrigNSD << ", " << _sumWTrig);
+ MSG_DEBUG("sumW_NSD,inel = " << double(_sumWTrigNSD) << ", " << double(_sumWTrig));
scale(_hist_eta_nsd, 0.5/_sumWTrigNSD);
scale(_hist_eta_inelastic, 0.5/_sumWTrig);
//
- MSG_DEBUG("sumW[n] = " << _sumWn);
for (size_t i = 0; i < _hists_eta_nsd.size(); ++i) {
+ MSG_DEBUG("sumW[n] = " << double(_sumWn[i]));
scale(_hists_eta_nsd[i], 0.5/_sumWn[i]);
}
}
private:
/// @name Weight counters
//@{
- double _sumWTrig;
- double _sumWTrigNSD;
- vector<double> _sumWn;
+ CounterPtr _sumWTrig;
+ CounterPtr _sumWTrigNSD;
+ vector<CounterPtr> _sumWn;
//@}
/// @name Histograms
//@{
Histo1DPtr _hist_eta_nsd;
Histo1DPtr _hist_eta_inelastic;
vector<Histo1DPtr> _hists_eta_nsd;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1986_S1583476);
}
diff --git a/analyses/pluginSPS/UA5_1987_S1640666.cc b/analyses/pluginSPS/UA5_1987_S1640666.cc
--- a/analyses/pluginSPS/UA5_1987_S1640666.cc
+++ b/analyses/pluginSPS/UA5_1987_S1640666.cc
@@ -1,73 +1,72 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/TriggerUA5.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class UA5_1987_S1640666 : public Analysis {
public:
/// Constructor
UA5_1987_S1640666()
: Analysis("UA5_1987_S1640666")
{
- _sumWPassed = 0;
}
/// Book histograms and initialise projections before the run
void init() {
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(-5.0, 5.0), "CFS");
book(_hist_mean_nch ,1, 1, 1);
book(_hist_nch ,3, 1, 1);
+ book(_sumWPassed, "SumW");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.nsdDecision()) vetoEvent;
- const double weight = 1.0;
- _sumWPassed += weight;
+ _sumWPassed->fill();
// Count final state particles in several eta regions
const int Nch = apply<ChargedFinalState>(event, "CFS").size();
// Fill histograms
- _hist_nch->fill(Nch, weight);
- _hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), Nch*weight);
+ _hist_nch->fill(Nch);
+ _hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), Nch);
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_hist_nch, 1.0/_sumWPassed);
scale(_hist_mean_nch, 1.0/_sumWPassed);
}
private:
- double _sumWPassed;
+ CounterPtr _sumWPassed;
Histo1DPtr _hist_mean_nch;
Histo1DPtr _hist_nch;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1987_S1640666);
}
diff --git a/analyses/pluginSPS/UA5_1988_S1867512.cc b/analyses/pluginSPS/UA5_1988_S1867512.cc
--- a/analyses/pluginSPS/UA5_1988_S1867512.cc
+++ b/analyses/pluginSPS/UA5_1988_S1867512.cc
@@ -1,193 +1,195 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
namespace {
/// @brief Helper function to fill correlation points into scatter plot
Point2D correlation_helper(double x, double xerr, const vector<int> & nf, const vector<int> & nb, double sumWPassed) {
return Point2D(x, correlation(nf, nb), xerr, correlation_err(nf, nb)/sqrt(sumWPassed));
}
}
/// @brief UA5 charged particle correlations at 200, 546 and 900 GeV
class UA5_1988_S1867512 : public Analysis {
public:
UA5_1988_S1867512()
- : Analysis("UA5_1988_S1867512"), _sumWPassed(0)
+ : Analysis("UA5_1988_S1867512")
{ }
/// @name Analysis methods
//@{
void init() {
// Projections
declare(TriggerUA5(), "Trigger");
// Symmetric eta interval
declare(ChargedFinalState(-0.5, 0.5), "CFS05");
// Asymmetric intervals first
// Forward eta intervals
declare(ChargedFinalState(0.0, 1.0), "CFS10F");
declare(ChargedFinalState(0.5, 1.5), "CFS15F");
declare(ChargedFinalState(1.0, 2.0), "CFS20F");
declare(ChargedFinalState(1.5, 2.5), "CFS25F");
declare(ChargedFinalState(2.0, 3.0), "CFS30F");
declare(ChargedFinalState(2.5, 3.5), "CFS35F");
declare(ChargedFinalState(3.0, 4.0), "CFS40F");
// Backward eta intervals
declare(ChargedFinalState(-1.0, 0.0), "CFS10B");
declare(ChargedFinalState(-1.5, -0.5), "CFS15B");
declare(ChargedFinalState(-2.0, -1.0), "CFS20B");
declare(ChargedFinalState(-2.5, -1.5), "CFS25B");
declare(ChargedFinalState(-3.0, -2.0), "CFS30B");
declare(ChargedFinalState(-3.5, -2.5), "CFS35B");
declare(ChargedFinalState(-4.0, -3.0), "CFS40B");
// Histogram booking, we have sqrt(s) = 200, 546 and 900 GeV
// TODO use Scatter2D to be able to output errors
if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) {
book(_hist_correl, 2, 1, 1);
book(_hist_correl_asym, 3, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 546.0, 1E-4)) {
book(_hist_correl, 2, 1, 2);
book(_hist_correl_asym, 3, 1, 2);
} else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) {
book(_hist_correl, 2, 1, 3);
book(_hist_correl_asym, 3, 1, 3);
}
+
+ book(_sumWPassed, "sumW");
}
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerUA5>(event, "Trigger").nsdDecision();
if (!trigger) vetoEvent;
- _sumWPassed += 1.0;
+ _sumWPassed->fill();
// Count forward/backward particles
n_10f.push_back(apply<ChargedFinalState>(event, "CFS10F").size());
n_15f.push_back(apply<ChargedFinalState>(event, "CFS15F").size());
n_20f.push_back(apply<ChargedFinalState>(event, "CFS20F").size());
n_25f.push_back(apply<ChargedFinalState>(event, "CFS25F").size());
n_30f.push_back(apply<ChargedFinalState>(event, "CFS30F").size());
n_35f.push_back(apply<ChargedFinalState>(event, "CFS35F").size());
n_40f.push_back(apply<ChargedFinalState>(event, "CFS40F").size());
//
n_10b.push_back(apply<ChargedFinalState>(event, "CFS10B").size());
n_15b.push_back(apply<ChargedFinalState>(event, "CFS15B").size());
n_20b.push_back(apply<ChargedFinalState>(event, "CFS20B").size());
n_25b.push_back(apply<ChargedFinalState>(event, "CFS25B").size());
n_30b.push_back(apply<ChargedFinalState>(event, "CFS30B").size());
n_35b.push_back(apply<ChargedFinalState>(event, "CFS35B").size());
n_40b.push_back(apply<ChargedFinalState>(event, "CFS40B").size());
//
n_05 .push_back(apply<ChargedFinalState>(event, "CFS05").size());
}
void finalize() {
// The correlation strength is defined in formulas
// 4.1 and 4.2
// Fill histos, gap width histo comes first
// * Set the errors as Delta b / sqrt(sumWPassed) with
// Delta b being the absolute uncertainty of b according to
// Gaussian error-propagation (linear limit) and assuming
// Poissonian uncertainties for the number of particles in
// the eta-intervals
//
// Define vectors to be able to fill Scatter2Ds
vector<Point2D> points;
// Fill the y-value vector
points.push_back(correlation_helper(0, 0.5, n_10f, n_10b, _sumWPassed));
points.push_back(correlation_helper(1, 0.5, n_15f, n_15b, _sumWPassed));
points.push_back(correlation_helper(2, 0.5, n_20f, n_20b, _sumWPassed));
points.push_back(correlation_helper(3, 0.5, n_25f, n_25b, _sumWPassed));
points.push_back(correlation_helper(4, 0.5, n_30f, n_30b, _sumWPassed));
points.push_back(correlation_helper(5, 0.5, n_35f, n_35b, _sumWPassed));
points.push_back(correlation_helper(6, 0.5, n_40f, n_40b, _sumWPassed));
// Fill the DPS
_hist_correl->addPoints(points);
// Fill gap-center histo (Fig 15)
//
// The first bin contains the c_str strengths of
// the gap size histo that has ane eta gap of two
//
// Now do the other histo -- clear already defined vectors first
points.clear();
points.push_back(correlation_helper(0, 0.25, n_20f, n_20b, _sumWPassed));
points.push_back(correlation_helper(0.5, 0.25, n_25f, n_15b, _sumWPassed));
points.push_back(correlation_helper(1, 0.25, n_30f, n_10b, _sumWPassed));
points.push_back(correlation_helper(1.5, 0.25, n_35f, n_05 , _sumWPassed));
points.push_back(correlation_helper(2, 0.25, n_40f, n_10f, _sumWPassed));
// Fill in correlation strength for assymetric intervals,
// see Tab. 5
// Fill the DPS
_hist_correl_asym->addPoints(points);
}
//@}
private:
/// @name Counters
//@{
- double _sumWPassed;
+ CounterPtr _sumWPassed;
//@}
/// @name Vectors for storing the number of particles in the different eta intervals per event.
/// @todo Is there a better way?
//@{
std::vector<int> n_10f;
std::vector<int> n_15f;
std::vector<int> n_20f;
std::vector<int> n_25f;
std::vector<int> n_30f;
std::vector<int> n_35f;
std::vector<int> n_40f;
//
std::vector<int> n_10b;
std::vector<int> n_15b;
std::vector<int> n_20b;
std::vector<int> n_25b;
std::vector<int> n_30b;
std::vector<int> n_35b;
std::vector<int> n_40b;
//
std::vector<int> n_05;
//@}
/// @name Histograms
//@{
// Symmetric eta intervals
Scatter2DPtr _hist_correl;
// For asymmetric eta intervals
Scatter2DPtr _hist_correl_asym;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1988_S1867512);
}
diff --git a/analyses/pluginSPS/UA5_1989_S1926373.cc b/analyses/pluginSPS/UA5_1989_S1926373.cc
--- a/analyses/pluginSPS/UA5_1989_S1926373.cc
+++ b/analyses/pluginSPS/UA5_1989_S1926373.cc
@@ -1,112 +1,110 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief UA5 min bias charged multiplicities in central \f$ \eta \f$ ranges
class UA5_1989_S1926373 : public Analysis {
public:
/// Constructor
UA5_1989_S1926373() : Analysis("UA5_1989_S1926373") {
- _sumWPassed = 0;
}
/// @name Analysis methods
//@{
/// Book histograms and projections
void init() {
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(-0.5, 0.5), "CFS05");
declare(ChargedFinalState(-1.5, 1.5), "CFS15");
declare(ChargedFinalState(-3.0, 3.0), "CFS30");
declare(ChargedFinalState(-5.0, 5.0), "CFS50");
// NB. _hist_nch and _hist_ncheta50 use the same data but different binning
if (fuzzyEquals(sqrtS()/GeV, 200, 1E-3)) {
book(_hist_nch ,1, 1, 1);
book(_hist_nch_eta05 ,3, 1, 1);
book(_hist_nch_eta15 ,4, 1, 1);
book(_hist_nch_eta30 ,5, 1, 1);
book(_hist_nch_eta50 ,6, 1, 1);
book(_hist_mean_nch ,11, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
book(_hist_nch ,2, 1, 1);
book(_hist_nch_eta05 ,7, 1, 1);
book(_hist_nch_eta15 ,8, 1, 1);
book(_hist_nch_eta30 ,9, 1, 1);
book(_hist_nch_eta50 ,10, 1, 1);
book(_hist_mean_nch ,12, 1, 1);
}
-
+ book(_sumWPassed, "SumW");
/// @todo Moments of distributions
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.nsdDecision()) vetoEvent;
- const double weight = 1.0;
- _sumWPassed += weight;
+ _sumWPassed->fill();
// Count final state particles in several eta regions
const int numP05 = apply<ChargedFinalState>(event, "CFS05").size();
const int numP15 = apply<ChargedFinalState>(event, "CFS15").size();
const int numP30 = apply<ChargedFinalState>(event, "CFS30").size();
const int numP50 = apply<ChargedFinalState>(event, "CFS50").size();
// Fill histograms
- _hist_nch->fill(numP50, weight);
- _hist_nch_eta05->fill(numP05, weight);
- _hist_nch_eta15->fill(numP15, weight);
- _hist_nch_eta30->fill(numP30, weight);
- _hist_nch_eta50->fill(numP50, weight);
- _hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), numP50*weight);
+ _hist_nch->fill(numP50);
+ _hist_nch_eta05->fill(numP05);
+ _hist_nch_eta15->fill(numP15);
+ _hist_nch_eta30->fill(numP30);
+ _hist_nch_eta50->fill(numP50);
+ _hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), numP50);
}
void finalize() {
scale(_hist_nch, 1.0/_sumWPassed);
scale(_hist_nch_eta05, 1.0/_sumWPassed);
scale(_hist_nch_eta15, 1.0/_sumWPassed);
scale(_hist_nch_eta30, 1.0/_sumWPassed);
scale(_hist_nch_eta50, 1.0/_sumWPassed);
scale(_hist_mean_nch, 1.0/_sumWPassed);
}
//@}
private:
/// @name Counters
//@{
- double _sumWPassed;
+ CounterPtr _sumWPassed;
//@}
/// @name Histograms
//@{
Histo1DPtr _hist_nch;
Histo1DPtr _hist_nch_eta05;
Histo1DPtr _hist_nch_eta15;
Histo1DPtr _hist_nch_eta30;
Histo1DPtr _hist_nch_eta50;
Histo1DPtr _hist_mean_nch;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1989_S1926373);
}
diff --git a/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc b/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc
--- a/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc
+++ b/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc
@@ -1,79 +1,78 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
class CMSTOTEM_2014_I1294140 : public Analysis {
public:
CMSTOTEM_2014_I1294140()
: Analysis("CMSTOTEM_2014_I1294140")
{ }
void init() {
ChargedFinalState cfs(-7.0, 7.0, 0.0*GeV);
declare(cfs, "CFS");
- _Nevt_after_cuts_or = 0;
- _Nevt_after_cuts_and = 0;
- _Nevt_after_cuts_xor = 0;
+ book(_Nevt_after_cuts_or, "Nevt_or");
+ book(_Nevt_after_cuts_and, "Nevt_and");
+ book(_Nevt_after_cuts_xor, "Nevt_xor");
if (fuzzyEquals(sqrtS(), 8000*GeV, 1E-3)) {
book(_h_dNch_dEta_OR ,1, 1, 1);
book(_h_dNch_dEta_AND ,2, 1, 1);
book(_h_dNch_dEta_XOR ,3, 1, 1);
}
}
void analyze(const Event& event) {
// Count forward and backward charged particles
const ChargedFinalState& charged = apply<ChargedFinalState>(event, "CFS");
int count_plus = 0, count_minus = 0;
foreach (const Particle& p, charged.particles()) {
if (inRange(p.eta(), 5.3, 6.5)) count_plus++;
if (inRange(p.eta(), -6.5, -5.3)) count_minus++;
}
// Cut combinations
const bool cutsor = (count_plus > 0 || count_minus > 0);
const bool cutsand = (count_plus > 0 && count_minus > 0);
const bool cutsxor = ( (count_plus > 0 && count_minus == 0) || (count_plus == 0 && count_minus > 0) );
// Increment counters and fill histos
- const double weight = 1.0;
- if (cutsor) _Nevt_after_cuts_or += weight;
- if (cutsand) _Nevt_after_cuts_and += weight;
- if (cutsxor) _Nevt_after_cuts_xor += weight;
+ if (cutsor) _Nevt_after_cuts_or ->fill();
+ if (cutsand) _Nevt_after_cuts_and ->fill();
+ if (cutsxor) _Nevt_after_cuts_xor ->fill();
foreach (const Particle& p, charged.particles()) {
- if (cutsor) _h_dNch_dEta_OR ->fill(p.abseta(), weight);
- if (cutsand) _h_dNch_dEta_AND->fill(p.abseta(), weight);
- if (cutsxor) _h_dNch_dEta_XOR->fill(p.abseta(), weight);
+ if (cutsor) _h_dNch_dEta_OR ->fill(p.abseta());
+ if (cutsand) _h_dNch_dEta_AND->fill(p.abseta());
+ if (cutsxor) _h_dNch_dEta_XOR->fill(p.abseta());
}
}
void finalize() {
scale(_h_dNch_dEta_OR, 0.5/_Nevt_after_cuts_or);
scale(_h_dNch_dEta_AND, 0.5/_Nevt_after_cuts_and);
scale(_h_dNch_dEta_XOR, 0.5/_Nevt_after_cuts_xor);
}
private:
Histo1DPtr _h_dNch_dEta_OR, _h_dNch_dEta_AND, _h_dNch_dEta_XOR;
- double _Nevt_after_cuts_or, _Nevt_after_cuts_and, _Nevt_after_cuts_xor;
+ CounterPtr _Nevt_after_cuts_or, _Nevt_after_cuts_and, _Nevt_after_cuts_xor;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(CMSTOTEM_2014_I1294140);
}
diff --git a/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc b/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc
--- a/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc
+++ b/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc
@@ -1,64 +1,62 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class TOTEM_2012_I1115294 : public Analysis {
public:
TOTEM_2012_I1115294()
: Analysis("TOTEM_2012_I1115294")
{ }
public:
void init() {
ChargedFinalState cfsm(-6.50, -5.35, 40.*MeV);
ChargedFinalState cfsp( 5.35, 6.50, 40.*MeV);
declare(cfsm, "CFSM");
declare(cfsp, "CFSP");
book(_h_eta ,1, 1, 1);
- _sumofweights = 0.;
+ book(_sumofweights, "sumofweights");
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
const ChargedFinalState cfsm = apply<ChargedFinalState>(event, "CFSM");
const ChargedFinalState cfsp = apply<ChargedFinalState>(event, "CFSP");
if (cfsm.size() == 0 && cfsp.size() == 0) vetoEvent;
- _sumofweights += weight;
+ _sumofweights->fill();
foreach (const Particle& p, cfsm.particles() + cfsp.particles()) {
- _h_eta->fill(p.abseta(), weight);
+ _h_eta->fill(p.abseta());
}
}
void finalize() {
scale(_h_eta, 1./(2*_sumofweights));
}
private:
- double _sumofweights;
+ CounterPtr _sumofweights;
Histo1DPtr _h_eta;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(TOTEM_2012_I1115294);
}
diff --git a/analyses/pluginTOTEM/TOTEM_2012_I1220862.cc b/analyses/pluginTOTEM/TOTEM_2012_I1220862.cc
--- a/analyses/pluginTOTEM/TOTEM_2012_I1220862.cc
+++ b/analyses/pluginTOTEM/TOTEM_2012_I1220862.cc
@@ -1,57 +1,55 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// TOTEM elastic and total cross-section measurement
class TOTEM_2012_I1220862 : public Analysis {
public:
TOTEM_2012_I1220862()
: Analysis("TOTEM_2012_I1220862")
{ }
void init() {
declare(ChargedFinalState(), "CFS");
book(_hist_tlow ,1, 1, 1);
book(_hist_thigh ,2, 1, 1);
book(_hist_sigma ,3, 1, 1);
}
void analyze(const Event& event) {
- const double weight = 1.0;
-
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
if (cfs.size() > 2) MSG_WARNING("Final state includes more than two charged particles!");
- _hist_sigma->fill(sqrtS()/GeV, weight);
+ _hist_sigma->fill(sqrtS()/GeV);
for (const Particle& p : cfs.particles(Cuts::eta > 0)) { // && Cuts::pid == PID::PROTON)) {
if (p.pid() != PID::PROTON) continue;
const double t = sqr(p.pT());
- _hist_tlow->fill(t, weight);
- _hist_thigh->fill(t, weight);
+ _hist_tlow->fill(t);
+ _hist_thigh->fill(t);
}
}
void finalize() {
normalize(_hist_tlow, crossSection()/millibarn);
normalize(_hist_thigh, crossSection()/millibarn);
normalize(_hist_sigma, crossSection()/millibarn);
}
private:
Histo1DPtr _hist_tlow, _hist_thigh, _hist_sigma;
};
DECLARE_ALIASED_RIVET_PLUGIN(TOTEM_2012_I1220862, TOTEM_2012_002);
}
diff --git a/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc b/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc
--- a/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc
+++ b/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc
@@ -1,58 +1,58 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class TOTEM_2014_I1328627 : public Analysis {
public:
TOTEM_2014_I1328627()
: Analysis("TOTEM_2014_I1328627")
{ }
void init() {
ChargedFinalState cfsm(-7.0, -6.0, 0.0*GeV);
ChargedFinalState cfsp( 3.7, 4.8, 0.0*GeV);
declare(cfsm, "CFSM");
declare(cfsp, "CFSP");
book(_h_eta ,1, 1, 1);
- _sumofweights = 0.;
+ book(_sumofweights, "sumofweights");
}
void analyze(const Event& event) {
const ChargedFinalState cfsm = apply<ChargedFinalState>(event, "CFSM");
const ChargedFinalState cfsp = apply<ChargedFinalState>(event, "CFSP");
if (cfsm.size() == 0 && cfsp.size() == 0) vetoEvent;
- _sumofweights += 1.0;
+ _sumofweights->fill();
foreach (const Particle& p, cfsm.particles() + cfsp.particles()) {
- _h_eta->fill(p.abseta(), 1.0);
+ _h_eta->fill(p.abseta());
}
}
void finalize() {
scale(_h_eta, 1./_sumofweights);
}
private:
- double _sumofweights;
+ CounterPtr _sumofweights;
Histo1DPtr _h_eta;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(TOTEM_2014_I1328627);
}
diff --git a/include/Rivet/Analyses/MC_ParticleAnalysis.hh b/include/Rivet/Analyses/MC_ParticleAnalysis.hh
--- a/include/Rivet/Analyses/MC_ParticleAnalysis.hh
+++ b/include/Rivet/Analyses/MC_ParticleAnalysis.hh
@@ -1,65 +1,67 @@
// -*- C++ -*-
#ifndef RIVET_MC_ParticleAnalysis_HH
#define RIVET_MC_ParticleAnalysis_HH
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
namespace Rivet {
/// @brief Base class providing common functionality for MC particle species validation analyses
class MC_ParticleAnalysis : public Analysis {
public:
/// Default constructor.
MC_ParticleAnalysis(const string& name,
size_t nparticles,
const string& particle_name);
/// @name Analysis methods
//@{
/// Bookings as usual, using the particle name specified by the derived classe
virtual void init();
/// To be implemented by derived classes, making particle selection then calling _analyze
virtual void analyze(const Event& event) = 0;
/// Normalization, division, etc.
virtual void finalize();
/// For derived classes to call, passing the sorted particle collection that they wish to analyse
virtual void _analyze(const Event& event, const Particles& particles);
//@}
protected:
/// The number of particles for which histograms are to be initialised
size_t _nparts;
/// The name of the particle species/group being analysed
std::string _pname;
/// @name Histograms
//@{
std::vector<Histo1DPtr> _h_pt;
std::vector<Histo1DPtr> _h_eta;
std::vector<Histo1DPtr> _h_eta_plus, _h_eta_minus;
std::vector<Histo1DPtr> _h_rap;
std::vector<Histo1DPtr> _h_rap_plus, _h_rap_minus;
+ std::vector<Scatter2DPtr> tmpeta, tmprap;
+
std::map<std::pair<size_t, size_t>, Histo1DPtr> _h_deta;
std::map<std::pair<size_t, size_t>, Histo1DPtr> _h_dphi;
std::map<std::pair<size_t, size_t>, Histo1DPtr> _h_dR;
Histo1DPtr _h_multi_exclusive, _h_multi_inclusive;
Histo1DPtr _h_multi_exclusive_prompt, _h_multi_inclusive_prompt;
Scatter2DPtr _h_multi_ratio, _h_multi_ratio_prompt;
//@}
};
}
#endif
diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh
--- a/include/Rivet/Analysis.hh
+++ b/include/Rivet/Analysis.hh
@@ -1,1067 +1,933 @@
// -*- C++ -*-
#ifndef RIVET_Analysis_HH
#define RIVET_Analysis_HH
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/AnalysisInfo.hh"
#include "Rivet/Event.hh"
#include "Rivet/Projection.hh"
#include "Rivet/ProjectionApplier.hh"
#include "Rivet/ProjectionHandler.hh"
#include "Rivet/AnalysisLoader.hh"
#include "Rivet/Tools/RivetYODA.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Tools/ParticleUtils.hh"
#include "Rivet/Tools/Cuts.hh"
/// @def vetoEvent
/// Preprocessor define for vetoing events, including the log message and return.
#define vetoEvent \
do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0)
namespace Rivet {
// Forward declaration
class AnalysisHandler;
/// @brief This is the base class of all analysis classes in Rivet.
///
/// There are
/// three virtual functions which should be implemented in base classes:
///
/// void init() is called by Rivet before a run is started. Here the
/// analysis class should book necessary histograms. The needed
/// projections should probably rather be constructed in the
/// constructor.
///
/// void analyze(const Event&) is called once for each event. Here the
/// analysis class should apply the necessary Projections and fill the
/// histograms.
///
/// void finalize() is called after a run is finished. Here the analysis
/// class should do whatever manipulations are necessary on the
/// histograms. Writing the histograms to a file is, however, done by
/// the Rivet class.
class Analysis : public ProjectionApplier {
/// The AnalysisHandler is a friend.
friend class AnalysisHandler;
public:
/// @name Standard constructors and destructors.
//@{
// /// The default constructor.
// Analysis();
/// Constructor
Analysis(const std::string& name);
/// The destructor.
virtual ~Analysis() {}
//@}
public:
/// @name Main analysis methods
//@{
/// Initialize this analysis object. A concrete class should here
/// book all necessary histograms. An overridden function must make
/// sure it first calls the base class function.
virtual void init() { }
/// Analyze one event. A concrete class should here apply the
/// necessary projections on the \a event and fill the relevant
/// histograms. An overridden function must make sure it first calls
/// the base class function.
virtual void analyze(const Event& event) = 0;
/// Finalize this analysis object. A concrete class should here make
/// all necessary operations on the histograms. Writing the
/// histograms to a file is, however, done by the Rivet class. An
/// overridden function must make sure it first calls the base class
/// function.
virtual void finalize() { }
//@}
public:
/// @name Metadata
/// Metadata is used for querying from the command line and also for
/// building web pages and the analysis pages in the Rivet manual.
//@{
/// Get the actual AnalysisInfo object in which all this metadata is stored.
const AnalysisInfo& info() const {
assert(_info && "No AnalysisInfo object :O");
return *_info;
}
/// @brief Get the name of the analysis.
///
/// By default this is computed by combining the results of the experiment,
/// year and Spires ID metadata methods and you should only override it if
/// there's a good reason why those won't work.
virtual std::string name() const {
return (info().name().empty()) ? _defaultname : info().name();
}
/// Get the Inspire ID code for this analysis.
virtual std::string inspireId() const {
return info().inspireId();
}
/// Get the SPIRES ID code for this analysis (~deprecated).
virtual std::string spiresId() const {
return info().spiresId();
}
/// @brief Names & emails of paper/analysis authors.
///
/// Names and email of authors in 'NAME \<EMAIL\>' format. The first
/// name in the list should be the primary contact person.
virtual std::vector<std::string> authors() const {
return info().authors();
}
/// @brief Get a short description of the analysis.
///
/// Short (one sentence) description used as an index entry.
/// Use @a description() to provide full descriptive paragraphs
/// of analysis details.
virtual std::string summary() const {
return info().summary();
}
/// @brief Get a full description of the analysis.
///
/// Full textual description of this analysis, what it is useful for,
/// what experimental techniques are applied, etc. Should be treated
/// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html),
/// with equations to be rendered as LaTeX with amsmath operators.
virtual std::string description() const {
return info().description();
}
/// @brief Information about the events needed as input for this analysis.
///
/// Event types, energies, kinematic cuts, particles to be considered
/// stable, etc. etc. Should be treated as a restructuredText bullet list
/// (http://docutils.sourceforge.net/rst.html)
virtual std::string runInfo() const {
return info().runInfo();
}
/// Experiment which performed and published this analysis.
virtual std::string experiment() const {
return info().experiment();
}
/// Collider on which the experiment ran.
virtual std::string collider() const {
return info().collider();
}
/// When the original experimental analysis was published.
virtual std::string year() const {
return info().year();
}
/// The luminosity in inverse femtobarn
virtual std::string luminosityfb() const {
return info().luminosityfb();
}
/// Journal, and preprint references.
virtual std::vector<std::string> references() const {
return info().references();
}
/// BibTeX citation key for this article.
virtual std::string bibKey() const {
return info().bibKey();
}
/// BibTeX citation entry for this article.
virtual std::string bibTeX() const {
return info().bibTeX();
}
/// Whether this analysis is trusted (in any way!)
virtual std::string status() const {
return (info().status().empty()) ? "UNVALIDATED" : info().status();
}
/// Any work to be done on this analysis.
virtual std::vector<std::string> todos() const {
return info().todos();
}
/// Return the allowed pairs of incoming beams required by this analysis.
virtual const std::vector<PdgIdPair>& requiredBeams() const {
return info().beams();
}
/// Declare the allowed pairs of incoming beams required by this analysis.
virtual Analysis& setRequiredBeams(const std::vector<PdgIdPair>& requiredBeams) {
info().setBeams(requiredBeams);
return *this;
}
/// Sets of valid beam energy pairs, in GeV
virtual const std::vector<std::pair<double, double> >& requiredEnergies() const {
return info().energies();
}
/// Get vector of analysis keywords
virtual const std::vector<std::string> & keywords() const {
return info().keywords();
}
/// Declare the list of valid beam energy pairs, in GeV
virtual Analysis& setRequiredEnergies(const std::vector<std::pair<double, double> >& requiredEnergies) {
info().setEnergies(requiredEnergies);
return *this;
}
/// Return true if this analysis needs to know the process cross-section.
/// @todo Remove this and require HepMC >= 2.06
bool needsCrossSection() const {
return info().needsCrossSection();
}
/// Declare whether this analysis needs to know the process cross-section from the generator.
/// @todo Remove this and require HepMC >= 2.06
Analysis& setNeedsCrossSection(bool needed=true) {
info().setNeedsCrossSection(needed);
return *this;
}
//@}
/// @name Internal metadata modifying methods
//@{
/// Get the actual AnalysisInfo object in which all this metadata is stored (non-const).
AnalysisInfo& info() {
assert(_info && "No AnalysisInfo object :O");
return *_info;
}
//@}
/// @name Run conditions
//@{
/// Incoming beams for this run
const ParticlePair& beams() const;
/// Incoming beam IDs for this run
const PdgIdPair beamIds() const;
/// Centre of mass energy for this run
double sqrtS() const;
//@}
/// @name Analysis / beam compatibility testing
//@{
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(const ParticlePair& beams) const;
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const;
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(const PdgIdPair& beams, const std::pair<double,double>& energies) const;
//@}
/// Set the cross section from the generator
Analysis& setCrossSection(double xs);
/// Access the controlling AnalysisHandler object.
AnalysisHandler& handler() const { return *_analysishandler; }
protected:
/// Get a Log object based on the name() property of the calling analysis object.
Log& getLog() const;
/// Get the process cross-section in pb. Throws if this hasn't been set.
double crossSection() const;
/// Get the process cross-section per generated event in pb. Throws if this
/// hasn't been set.
double crossSectionPerEvent() const;
/// Get the number of events seen (via the analysis handler). Use in the
/// finalize phase only.
size_t numEvents() const;
/// Get the sum of event weights seen (via the analysis handler). Use in the
/// finalize phase only.
double sumOfWeights() const;
protected:
/// @name Histogram paths
//@{
/// Get the canonical histogram "directory" path for this analysis.
const std::string histoDir() const;
/// Get the canonical histogram path for the named histogram in this analysis.
const std::string histoPath(const std::string& hname) const;
/// Get the canonical histogram path for the numbered histogram in this analysis.
const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
/// Get the internal histogram name for given d, x and y (cf. HepData)
const std::string makeAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
//@}
/// @name Histogram reference data
//@{
/// Get reference data for a named histo
/// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
template <typename T=YODA::Scatter2D>
const T& refData(const string& hname) const {
_cacheRefData();
MSG_TRACE("Using histo bin edges for " << name() << ":" << hname);
if (!_refdata[hname]) {
MSG_ERROR("Can't find reference histogram " << hname);
throw Exception("Reference data " + hname + " not found.");
}
return dynamic_cast<T&>(*_refdata[hname]);
}
/// Get reference data for a numbered histo
/// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
template <typename T=YODA::Scatter2D>
const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
const string hname = makeAxisCode(datasetId, xAxisId, yAxisId);
return refData(hname);
}
//@}
/// @name Counter booking
//@{
/// Book a counter.
CounterPtr & book(CounterPtr &, const std::string& name,
const std::string& title="");
// const std::string& valtitle=""
/// Book a counter, using a path generated from the dataset and axis ID codes
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
CounterPtr & book(CounterPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="");
// const std::string& valtitle=""
//@}
/// @name 1D histogram booking
//@{
/// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
size_t nbins, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const std::vector<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const std::initializer_list<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with binning from a reference scatter.
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const Scatter2D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram, using the binnings in the reference data histogram.
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Histo1DPtr & book(Histo1DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
//@}
/// @name 2D histogram booking
//@{
/// Book a 2D histogram with @a nxbins and @a nybins uniformly
/// distributed across the ranges @a xlower - @a xupper and @a
/// ylower - @a yupper respectively along the x- and y-axis.
Histo2DPtr & book(Histo2DPtr &,const std::string& name,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with non-uniform bins defined by the
/// vectors of bin edges @a xbinedges and @a ybinedges.
Histo2DPtr & book(Histo2DPtr &,const std::string& name,
const std::vector<double>& xbinedges,
const std::vector<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with non-uniform bins defined by the
/// vectors of bin edges @a xbinedges and @a ybinedges.
Histo2DPtr & book(Histo2DPtr &,const std::string& name,
const std::initializer_list<double>& xbinedges,
const std::initializer_list<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
// /// Book a 2D histogram with binning from a reference scatter.
// Histo2DPtr bookHisto2D(const std::string& name,
// const Scatter3D& refscatter,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D histogram, using the binnings in the reference data histogram.
// Histo2DPtr bookHisto2D(const std::string& name,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D histogram, using the binnings in the reference data histogram.
// ///
// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
// Histo2DPtr bookHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
//@}
/// @name 1D profile histogram booking
//@{
/// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
size_t nbins, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const std::vector<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const std::initializer_list<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with binning from a reference scatter.
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const Scatter2D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram, using the binnings in the reference data histogram.
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Profile1DPtr & book(Profile1DPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
//@}
/// @name 2D profile histogram booking
//@{
/// Book a 2D profile histogram with @a nxbins and @a nybins uniformly
/// distributed across the ranges @a xlower - @a xupper and @a ylower - @a
/// yupper respectively along the x- and y-axis.
Profile2DPtr & book(Profile2DPtr &, const std::string& name,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with non-uniform bins defined by the vectorx
/// of bin edges @a xbinedges and @a ybinedges.
Profile2DPtr & book(Profile2DPtr &, const std::string& name,
const std::vector<double>& xbinedges,
const std::vector<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with non-uniform bins defined by the vectorx
/// of bin edges @a xbinedges and @a ybinedges.
Profile2DPtr & book(Profile2DPtr &, const std::string& name,
const std::initializer_list<double>& xbinedges,
const std::initializer_list<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with binning from a reference scatter.
// Profile2DPtr bookProfile2D(const std::string& name,
// const Scatter3D& refscatter,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D profile histogram, using the binnings in the reference data histogram.
// Profile2DPtr bookProfile2D(const std::string& name,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D profile histogram, using the binnings in the reference data histogram.
// ///
// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
// Profile2DPtr bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
//@}
/// @name 2D scatter booking
//@{
/// @brief Book a 2-dimensional data point set with the given name.
///
/// @note Unlike histogram booking, scatter booking by default makes no
/// attempt to use reference data to pre-fill the data object. If you want
/// this, which is sometimes useful e.g. when the x-position is not really
/// meaningful and can't be extracted from the data, then set the @a
/// copy_pts parameter to true. This creates points to match the reference
/// data's x values and errors, but with the y values and errors zeroed...
/// assuming that there is a reference histo with the same name: if there
/// isn't, an exception will be thrown.
Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname,
bool copy_pts=false,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
///
/// @note Unlike histogram booking, scatter booking by default makes no
/// attempt to use reference data to pre-fill the data object. If you want
/// this, which is sometimes useful e.g. when the x-position is not really
/// meaningful and can't be extracted from the data, then set the @a
/// copy_pts parameter to true. This creates points to match the reference
/// data's x values and errors, but with the y values and errors zeroed.
Scatter2DPtr & book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
bool copy_pts=false,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set with equally spaced x-points in a range.
///
/// The y values and errors will be set to 0.
Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname,
size_t npts, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges".
///
/// The y values and errors will be set to 0.
Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname,
const std::vector<double>& binedges,
const std::string& title,
const std::string& xtitle,
const std::string& ytitle);
//@}
public:
/// @name Analysis object manipulation
/// @todo Should really be protected: only public to keep BinnedHistogram happy for now...
//@{
/// Multiplicatively scale the given counter, @a cnt, by factor @s factor.
void scale(CounterPtr cnt, double factor);
/// Multiplicatively scale the given counters, @a cnts, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of CounterPtrs
void scale(const std::vector<CounterPtr>& cnts, double factor) {
for (auto& c : cnts) scale(c, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
void scale(const CounterPtr (&cnts)[array_size], double factor) {
// for (size_t i = 0; i < std::extent<decltype(cnts)>::value; ++i) scale(cnts[i], factor);
for (auto& c : cnts) scale(c, factor);
}
/// Normalize the given histogram, @a histo, to area = @a norm.
void normalize(Histo1DPtr histo, double norm=1.0, bool includeoverflows=true);
/// Normalize the given histograms, @a histos, to area = @a norm.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo1DPtrs
void normalize(const std::vector<Histo1DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// @todo YUCK!
template <std::size_t array_size>
void normalize(const Histo1DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
void scale(Histo1DPtr histo, double factor);
/// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo1DPtrs
void scale(const std::vector<Histo1DPtr>& histos, double factor) {
for (auto& h : histos) scale(h, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
void scale(const Histo1DPtr (&histos)[array_size], double factor) {
for (auto& h : histos) scale(h, factor);
}
/// Normalize the given histogram, @a histo, to area = @a norm.
void normalize(Histo2DPtr histo, double norm=1.0, bool includeoverflows=true);
/// Normalize the given histograms, @a histos, to area = @a norm.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo2DPtrs
void normalize(const std::vector<Histo2DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// @todo YUCK!
template <std::size_t array_size>
void normalize(const Histo2DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
void scale(Histo2DPtr histo, double factor);
/// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo2DPtrs
void scale(const std::vector<Histo2DPtr>& histos, double factor) {
for (auto& h : histos) scale(h, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
void scale(const Histo2DPtr (&histos)[array_size], double factor) {
for (auto& h : histos) scale(h, factor);
}
/// Helper for counter division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const;
/// Helper for histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const;
/// Helper for histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for profile histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const;
/// Helper for profile histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const;
/// Helper for 2D histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const;
/// Helper for 2D histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const;
/// Helper for 2D profile histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const;
/// Helper for 2D profile histogram division with raw YODA objects
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const;
/// Helper for histogram efficiency calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram efficiency calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for histogram asymmetry calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram asymmetry calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for converting a differential histo to an integral one.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void integrate(Histo1DPtr h, Scatter2DPtr s) const;
/// Helper for converting a differential histo to an integral one.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void integrate(const Histo1D& h, Scatter2DPtr s) const;
//@}
public:
/// List of registered analysis data objects
- const vector<reference_wrapper<MultiweightAOPtr>>& analysisObjects() const {
+ const vector<MultiweightAOPtr>& analysisObjects() const {
return _analysisobjects;
}
protected:
- /// @name Data object registration, retrieval, and removal
+ /// @name Data object registration, and removal
//@{
/// Register a data object in the histogram system
- void addAnalysisObject(MultiweightAOPtr & ao);
-
- /// @todo we need these separately since we *only* want to call this for scatters?
- void addAnalysisObject(const shared_ptr<Scatter1DPtr>& ao);
- void addAnalysisObject(const shared_ptr<Scatter2DPtr>& ao);
- void addAnalysisObject(const shared_ptr<Scatter3DPtr>& ao);
-
- /// Get a data object from the histogram system
- template <typename AOPtr=YODA::AnalysisObject>
- const AOPtr& getAnalysisObject(const std::string& name) const {
- for (MultiweightAOPtr & ao : analysisObjects()) {
- if (ao->path() == histoPath(name)) return dynamic_cast<const AOPtr&>(ao);
- }
- throw Exception("Data object " + histoPath(name) + " not found");
- }
-
- /// Get a data object from the histogram system (non-const)
- template <typename AOPtr=YODA::AnalysisObject>
- AOPtr& getAnalysisObject(const std::string& name) {
- for (MultiweightAOPtr & ao : analysisObjects()) {
- if (ao->path() == histoPath(name)) return dynamic_cast<AOPtr&>(ao);
- }
-
- throw Exception("Data object " + histoPath(name) + " not found");
- }
+ void addAnalysisObject(const MultiweightAOPtr & ao);
/// Unregister a data object from the histogram system (by name)
void removeAnalysisObject(const std::string& path);
/// Unregister a data object from the histogram system (by pointer)
- void removeAnalysisObject(const MultiweightAOPtr& ao);
-
- void removeAnalysisObject(const Scatter1DPtr& ao);
- void removeAnalysisObject(const Scatter2DPtr& ao);
- void removeAnalysisObject(const Scatter3DPtr& ao);
-
-
- /// Get a named Histo1D object from the histogram system
- const Histo1DPtr getHisto1D(const std::string& name) const {
- return getAnalysisObject<Histo1DPtr>(name);
- }
-
- /// Get a named Histo1D object from the histogram system (non-const)
- Histo1DPtr getHisto1D(const std::string& name) {
- return getAnalysisObject<Histo1DPtr>(name);
- }
-
- /// Get a Histo1D object from the histogram system by axis ID codes (non-const)
- const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
- return getAnalysisObject<Histo1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
- /// Get a Histo1D object from the histogram system by axis ID codes (non-const)
- Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
- return getAnalysisObject<Histo1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
-
- /// Get a named Histo2D object from the histogram system
- const Histo2DPtr getHisto2D(const std::string& name) const {
- return getAnalysisObject<Histo2DPtr>(name);
- }
-
- /// Get a named Histo2D object from the histogram system (non-const)
- Histo2DPtr getHisto2D(const std::string& name) {
- return getAnalysisObject<Histo2DPtr>(name);
- }
-
- /// Get a Histo2D object from the histogram system by axis ID codes (non-const)
- const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
- return getAnalysisObject<Histo2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
- /// Get a Histo2D object from the histogram system by axis ID codes (non-const)
- Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
- return getAnalysisObject<Histo2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
-
- /// Get a named Profile1D object from the histogram system
- const Profile1DPtr getProfile1D(const std::string& name) const {
- return getAnalysisObject<Profile1DPtr>(name);
- }
-
- /// Get a named Profile1D object from the histogram system (non-const)
- Profile1DPtr getProfile1D(const std::string& name) {
- return getAnalysisObject<Profile1DPtr>(name);
- }
-
- /// Get a Profile1D object from the histogram system by axis ID codes (non-const)
- const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
- return getAnalysisObject<Profile1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
- /// Get a Profile1D object from the histogram system by axis ID codes (non-const)
- Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
- return getAnalysisObject<Profile1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
-
- /// Get a named Profile2D object from the histogram system
- const Profile2DPtr getProfile2D(const std::string& name) const {
- return getAnalysisObject<Profile2DPtr>(name);
- }
-
- /// Get a named Profile2D object from the histogram system (non-const)
- Profile2DPtr getProfile2D(const std::string& name) {
- return getAnalysisObject<Profile2DPtr>(name);
- }
-
- /// Get a Profile2D object from the histogram system by axis ID codes (non-const)
- const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
- return getAnalysisObject<Profile2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
- /// Get a Profile2D object from the histogram system by axis ID codes (non-const)
- Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
- return getAnalysisObject<Profile2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
-
- /// Get a named Scatter2D object from the histogram system
- const Scatter2DPtr getScatter2D(const std::string& name) const {
- return getAnalysisObject<Scatter2DPtr>(name);
- }
-
- /// Get a named Scatter2D object from the histogram system (non-const)
- Scatter2DPtr getScatter2D(const std::string& name) {
- return getAnalysisObject<Scatter2DPtr>(name);
- }
-
- /// Get a Scatter2D object from the histogram system by axis ID codes (non-const)
- const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
- return getAnalysisObject<Scatter2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
-
- /// Get a Scatter2D object from the histogram system by axis ID codes (non-const)
- Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
- return getAnalysisObject<Scatter2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
- }
+ void removeAnalysisObject(const MultiweightAOPtr & ao);
//@}
private:
/// Name passed to constructor (used to find .info analysis data file, and as a fallback)
string _defaultname;
/// Pointer to analysis metadata object
unique_ptr<AnalysisInfo> _info;
/// Storage of all plot objects
/// @todo Make this a map for fast lookup by path?
- vector<reference_wrapper<MultiweightAOPtr>> _analysisobjects;
- vector<shared_ptr<AnalysisObjectPtr> > _scatters;
+ vector<MultiweightAOPtr> _analysisobjects;
/// @name Cross-section variables
//@{
double _crossSection;
bool _gotCrossSection;
//@}
/// The controlling AnalysisHandler object.
AnalysisHandler* _analysishandler;
/// Collection of cached refdata to speed up many autobookings: the
/// reference data file should only be read once.
mutable std::map<std::string, YODA::AnalysisObjectPtr> _refdata;
private:
/// @name Utility functions
//@{
/// Get the reference data for this paper and cache it.
void _cacheRefData() const;
//@}
/// The assignment operator is private and must never be called.
/// In fact, it should not even be implemented.
Analysis& operator=(const Analysis&);
};
}
// Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh
#include "Rivet/AnalysisBuilder.hh"
/// @def DECLARE_RIVET_PLUGIN
/// Preprocessor define to prettify the global-object plugin hook mechanism.
#define DECLARE_RIVET_PLUGIN(clsname) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname
/// @def DECLARE_ALIASED_RIVET_PLUGIN
/// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis.
// #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname ## ( ## #alias ## )
#define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias )
/// @def DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR
/// Preprocessor define to prettify the manky constructor with name string argument
#define DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) clsname() : Analysis(# clsname) {}
/// @def DEFAULT_RIVET_ANALYSIS_CTOR
/// Slight abbreviation for DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR
#define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname)
#endif
diff --git a/include/Rivet/AnalysisHandler.hh b/include/Rivet/AnalysisHandler.hh
--- a/include/Rivet/AnalysisHandler.hh
+++ b/include/Rivet/AnalysisHandler.hh
@@ -1,271 +1,271 @@
// -*- C++ -*-
#ifndef RIVET_RivetHandler_HH
#define RIVET_RivetHandler_HH
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/Particle.hh"
#include "Rivet/AnalysisLoader.hh"
#include "Rivet/Tools/RivetYODA.hh"
namespace Rivet {
// Forward declaration and smart pointer for Analysis
class Analysis;
typedef std::shared_ptr<Analysis> AnaHandle;
// Needed to make smart pointers compare equivalent in the STL set
struct CmpAnaHandle {
bool operator() (const AnaHandle& a, const AnaHandle& b) {
return a.get() < b.get();
}
};
/// A class which handles a number of analysis objects to be applied to
/// generated events. An {@link Analysis}' AnalysisHandler is also responsible
/// for handling the final writing-out of histograms.
class AnalysisHandler {
public:
/// @name Constructors and destructors. */
//@{
/// Preferred constructor, with optional run name.
AnalysisHandler(const string& runname="");
/// @brief Destructor
/// The destructor is not virtual, as this class should not be inherited from.
~AnalysisHandler();
//@}
private:
/// Get a logger object.
Log& getLog() const;
public:
/// @name Run properties
//@{
/// Get the name of this run.
string runName() const;
/// Get the number of events seen. Should only really be used by external
/// steering code or analyses in the finalize phase.
size_t numEvents() const;
/// Get the sum of the event weights seen - the weighted equivalent of the
/// number of events. Should only really be used by external steering code
/// or analyses in the finalize phase.
double sumOfWeights() const {
return _eventCounter->sumW();
}
size_t numWeights() const {
return _weightNames.size();
}
/// Set the active weight.
void setActiveWeight(unsigned int iWeight);
/// Is cross-section information required by at least one child analysis?
bool needCrossSection() const;
/// Set the cross-section for the process being generated.
AnalysisHandler& setCrossSection(double xs);
/// Get the cross-section known to the handler.
double crossSection() const {
return _xs;
}
/// Whether the handler knows about a cross-section.
bool hasCrossSection() const;
/// Set the beam particles for this run
AnalysisHandler& setRunBeams(const ParticlePair& beams) {
_beams = beams;
MSG_DEBUG("Setting run beams = " << beams << " @ " << sqrtS()/GeV << " GeV");
return *this;
}
/// Get the beam particles for this run, usually determined from the first event.
const ParticlePair& beams() const { return _beams; }
/// Get beam IDs for this run, usually determined from the first event.
/// @deprecated Use standalone beamIds(ah.beams()), to clean AH interface
PdgIdPair beamIds() const;
/// Get energy for this run, usually determined from the first event.
/// @deprecated Use standalone sqrtS(ah.beams()), to clean AH interface
double sqrtS() const;
/// Setter for _ignoreBeams
void setIgnoreBeams(bool ignore=true);
//@}
/// @name Handle analyses
//@{
/// Get a list of the currently registered analyses' names.
std::vector<std::string> analysisNames() const;
/// Get the collection of currently registered analyses.
const std::set<AnaHandle, CmpAnaHandle>& analyses() const {
return _analyses;
}
/// Get a registered analysis by name.
const AnaHandle analysis(const std::string& analysisname) const;
/// Add an analysis to the run list by object
AnalysisHandler& addAnalysis(Analysis* analysis);
/// @brief Add an analysis to the run list using its name.
///
/// The actual Analysis to be used will be obtained via
/// AnalysisLoader::getAnalysis(string). If no matching analysis is found,
/// no analysis is added (i.e. the null pointer is checked and discarded.
AnalysisHandler& addAnalysis(const std::string& analysisname);
/// @brief Add analyses to the run list using their names.
///
/// The actual {@link Analysis}' to be used will be obtained via
/// AnalysisHandler::addAnalysis(string), which in turn uses
/// AnalysisLoader::getAnalysis(string). If no matching analysis is found
/// for a given name, no analysis is added, but also no error is thrown.
AnalysisHandler& addAnalyses(const std::vector<std::string>& analysisnames);
/// Remove an analysis from the run list using its name.
AnalysisHandler& removeAnalysis(const std::string& analysisname);
/// Remove analyses from the run list using their names.
AnalysisHandler& removeAnalyses(const std::vector<std::string>& analysisnames);
//@}
/// @name Main init/execute/finalise
//@{
/// Initialize a run, with the run beams taken from the example event.
void init(const GenEvent& event);
/// @brief Analyze the given \a event by reference.
///
/// This function will call the AnalysisBase::analyze() function of all
/// included analysis objects.
void analyze(const GenEvent& event);
/// @brief Analyze the given \a event by pointer.
///
/// This function will call the AnalysisBase::analyze() function of all
/// included analysis objects, after checking the event pointer validity.
void analyze(const GenEvent* event);
/// Finalize a run. This function calls the AnalysisBase::finalize()
/// functions of all included analysis objects.
void finalize();
//@}
/// @name Histogram / data object access
//@{
/// Add a vector of analysis objects to the current state.
void addData(const std::vector<YODA::AnalysisObjectPtr>& aos);
/// Read analysis plots into the histo collection (via addData) from the named file.
void readData(const std::string& filename);
/// Get all analyses' plots as a vector of analysis objects.
std::vector<YODA::AnalysisObjectPtr> getData() const;
- std::vector<reference_wrapper<MultiweightAOPtr> > getRivetAOs() const;
+ std::vector<MultiweightAOPtr> getRivetAOs() const;
std::vector<YODA::AnalysisObjectPtr> getYodaAOs() const;
/// Get all analyses' plots as a vector of analysis objects.
void setWeightNames(const GenEvent& ge);
/// Do we have named weights?
bool haveNamedWeights();
/// Write all analyses' plots (via getData) to the named file.
void writeData(const std::string& filename) const;
//@}
/// Indicate which Rivet stage we're in.
/// At the moment, only INIT is used to enable booking.
enum class Stage { OTHER, INIT };
/// Which stage are we in?
Stage stage() const { return _stage; }
private:
/// Current handler stage
Stage _stage = Stage::OTHER;
/// The collection of Analysis objects to be used.
set<AnaHandle, CmpAnaHandle> _analyses;
/// @name Run properties
//@{
/// Weight names
std::vector<std::string> _weightNames;
std::vector<std::valarray<double> > _subEventWeights;
size_t _numWeightTypes; // always == WeightVector.size()
/// Run name
std::string _runname;
mutable CounterPtr _eventCounter;
/// Cross-section known to AH
double _xs, _xserr;
/// Beams used by this run.
ParticlePair _beams;
/// Flag to check if init has been called
bool _initialised;
/// Flag whether input event beams should be ignored in compatibility check
bool _ignoreBeams;
/// Current event number
int _eventNumber;
//@}
private:
/// The assignment operator is private and must never be called.
/// In fact, it should not even be implemented.
AnalysisHandler& operator=(const AnalysisHandler&);
/// The copy constructor is private and must never be called. In
/// fact, it should not even be implemented.
AnalysisHandler(const AnalysisHandler&);
};
}
#endif
diff --git a/include/Rivet/Tools/RivetYODA.hh b/include/Rivet/Tools/RivetYODA.hh
--- a/include/Rivet/Tools/RivetYODA.hh
+++ b/include/Rivet/Tools/RivetYODA.hh
@@ -1,438 +1,490 @@
#ifndef RIVET_RIVETYODA_HH
#define RIVET_RIVETYODA_HH
/// @author Andy Buckley
/// @date 2009-01-30
/// @author David Grellscheid
/// @date 2011-07-18
/// @author David Grellscheid
/// @date 2016-09-27
#include "Rivet/Config/RivetCommon.hh"
#include "YODA/AnalysisObject.h"
#include "YODA/Counter.h"
#include "YODA/Histo1D.h"
#include "YODA/Histo2D.h"
#include "YODA/Profile1D.h"
#include "YODA/Profile2D.h"
#include "YODA/Scatter1D.h"
#include "YODA/Scatter2D.h"
#include "YODA/Scatter3D.h"
#include <map>
namespace YODA {
typedef std::shared_ptr<YODA::AnalysisObject> AnalysisObjectPtr;
// typedef std::shared_ptr<YODA::Scatter1D> Scatter1DPtr;
// typedef std::shared_ptr<YODA::Scatter2D> Scatter2DPtr;
// typedef std::shared_ptr<YODA::Scatter3D> Scatter3DPtr;
}
namespace Rivet {
- class AnalysisObjectPtr {
+ class AnalysisObjectWrapper {
public:
- virtual ~AnalysisObjectPtr() {}
+ virtual ~AnalysisObjectWrapper() {}
virtual YODA::AnalysisObject* operator->() = 0;
virtual YODA::AnalysisObject* operator->() const = 0;
virtual const YODA::AnalysisObject & operator*() const = 0;
/// @todo
/// rename to setActive(Idx)?
virtual void setActiveWeightIdx(unsigned int iWeight) = 0;
virtual void blockDestructor(bool) = 0;
- bool operator ==(const AnalysisObjectPtr& p) { return (this == &p); }
+ bool operator ==(const AnalysisObjectWrapper& p) { return (this == &p); }
protected:
/// @todo do we need this?
// virtual void reset() = 0;
};
/// @todo
/// implement scatter1dptr and scatter2dptr here
/// these need to be multi-weighted eventually.
/*
class Scatter1DPtr : public AnalysisObjectPtr {
public:
Scatter1DPtr() : _persistent() { }
Scatter1DPtr(size_t len_of_weightvec, const YODA::Scatter1D& p) {
for (size_t m = 0; m < len_of_weightvec; ++m)
_persistent.push_back(make_shared<YODA::Scatter1D>(p));
}
bool operator!() const { return !_persistent; }
operator bool() const { return bool(_persistent); }
YODA::Scatter1D* operator->() { return _persistent.get(); }
YODA::Scatter1D* operator->() const { return _persistent.get(); }
YODA::Scatter1D & operator*() { return *_persistent; }
const YODA::Scatter1D & operator*() const { return *_persistent; }
protected:
vector<YODA::Scatter1DPtr> _persistent;
};
class Scatter2DPtr : public AnalysisObjectPtr {
public:
Scatter2DPtr(size_t len_of_weightvec, const YODA::Scatter2D& p) {
for (size_t m = 0; m < len_of_weightvec; ++m)
_persistent.push_back(make_shared<YODA::Scatter2D>(p));
}
Scatter2DPtr() : _persistent() { }
bool operator!() { return !_persistent; }
operator bool() { return bool(_persistent); }
YODA::Scatter2D* operator->() { return _persistent.get(); }
YODA::Scatter2D* operator->() const { return _persistent.get(); }
YODA::Scatter2D & operator*() { return *_persistent; }
const YODA::Scatter2D & operator*() const { return *_persistent; }
protected:
vector<YODA::Scatter2DPtr> _persistent;
};
class Scatter3DPtr : public AnalysisObjectPtr {
public:
Scatter3DPtr(size_t len_of_weightvec, const YODA::Scatter3D& p) {
for (size_t m = 0; m < len_of_weightvec; ++m)
_persistent.push_back(make_shared<YODA::Scatter3D>(p));
}
Scatter3DPtr() : _persistent() { }
bool operator!() { return !_persistent; }
operator bool() { return bool(_persistent); }
YODA::Scatter3D* operator->() { return _persistent.get(); }
YODA::Scatter3D* operator->() const { return _persistent.get(); }
YODA::Scatter3D & operator*() { return *_persistent; }
const YODA::Scatter3D & operator*() const { return *_persistent; }
protected:
vector<YODA::Scatter3DPtr> _persistent;
};
*/
- class MultiweightAOPtr : public AnalysisObjectPtr {
+ class MultiweightAOWrapper : public AnalysisObjectWrapper {
public:
+ using Inner = YODA::AnalysisObject;
+
virtual void newSubEvent() = 0;
virtual void pushToPersistent(const vector<valarray<double> >& weight) = 0;
virtual YODA::AnalysisObjectPtr activeYODAPtr() const = 0;
};
using Weight = double;
template <class T>
using Fill = pair<typename T::FillType, Weight>;
template <class T>
using Fills = multiset<Fill<T>>;
// TODO TODO TODO
// need to override the old fill method too!
// otherwise we can't intercept existing fill calls in analysis code
// TODO TODO TODO
template <class T>
class TupleWrapper;
template<>
class TupleWrapper<YODA::Counter> : public YODA::Counter {
public:
typedef shared_ptr<TupleWrapper<YODA::Counter>> Ptr;
TupleWrapper(const YODA::Counter & h) : YODA::Counter(h) {}
// todo: do we need to deal with users using fractions directly?
void fill( double weight=1.0, double fraction=1.0 ) {
fills_.insert( {YODA::Counter::FillType(),weight} );
}
void reset() { fills_.clear(); }
const Fills<YODA::Counter> & fills() const { return fills_; }
private:
// x / weight pairs
Fills<YODA::Counter> fills_;
};
template<>
class TupleWrapper<YODA::Histo1D> : public YODA::Histo1D {
public:
typedef shared_ptr<TupleWrapper<YODA::Histo1D>> Ptr;
TupleWrapper(const YODA::Histo1D & h) : YODA::Histo1D(h) {}
// todo: do we need to deal with users using fractions directly?
void fill( double x, double weight=1.0, double fraction=1.0 ) {
if ( std::isnan(x) ) throw YODA::RangeError("X is NaN");
fills_.insert( { x , weight } );
}
void reset() { fills_.clear(); }
const Fills<YODA::Histo1D> & fills() const { return fills_; }
private:
// x / weight pairs
Fills<YODA::Histo1D> fills_;
};
template<>
class TupleWrapper<YODA::Profile1D> : public YODA::Profile1D {
public:
typedef shared_ptr<TupleWrapper<YODA::Profile1D>> Ptr;
TupleWrapper(const YODA::Profile1D & h) : YODA::Profile1D(h) {}
// todo: do we need to deal with users using fractions directly?
void fill( double x, double y, double weight=1.0, double fraction=1.0 ) {
if ( std::isnan(x) ) throw YODA::RangeError("X is NaN");
if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN");
fills_.insert( { YODA::Profile1D::FillType{x,y}, weight } );
}
void reset() { fills_.clear(); }
const Fills<YODA::Profile1D> & fills() const { return fills_; }
private:
// x / weight pairs
Fills<YODA::Profile1D> fills_;
};
template<>
class TupleWrapper<YODA::Histo2D> : public YODA::Histo2D {
public:
typedef shared_ptr<TupleWrapper<YODA::Histo2D>> Ptr;
TupleWrapper(const YODA::Histo2D & h) : YODA::Histo2D(h) {}
// todo: do we need to deal with users using fractions directly?
void fill( double x, double y, double weight=1.0, double fraction=1.0 ) {
if ( std::isnan(x) ) throw YODA::RangeError("X is NaN");
if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN");
fills_.insert( { YODA::Histo2D::FillType{x,y}, weight } );
}
void reset() { fills_.clear(); }
const Fills<YODA::Histo2D> & fills() const { return fills_; }
private:
// x / weight pairs
Fills<YODA::Histo2D> fills_;
};
template<>
class TupleWrapper<YODA::Profile2D> : public YODA::Profile2D {
public:
typedef shared_ptr<TupleWrapper<YODA::Profile2D>> Ptr;
TupleWrapper(const YODA::Profile2D & h) : YODA::Profile2D(h) {}
// todo: do we need to deal with users using fractions directly?
void fill( double x, double y, double z, double weight=1.0, double fraction=1.0 ) {
if ( std::isnan(x) ) throw YODA::RangeError("X is NaN");
if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN");
if ( std::isnan(z) ) throw YODA::RangeError("Z is NaN");
fills_.insert( { YODA::Profile2D::FillType{x,y,z}, weight } );
}
void reset() { fills_.clear(); }
const Fills<YODA::Profile2D> & fills() const { return fills_; }
private:
// x / weight pairs
Fills<YODA::Profile2D> fills_;
};
template<>
class TupleWrapper<YODA::Scatter1D> : public YODA::Scatter1D {
public:
typedef shared_ptr<TupleWrapper<YODA::Scatter1D>> Ptr;
TupleWrapper(const YODA::Scatter1D & h) : YODA::Scatter1D(h) {}
};
template<>
class TupleWrapper<YODA::Scatter2D> : public YODA::Scatter2D {
public:
typedef shared_ptr<TupleWrapper<YODA::Scatter2D>> Ptr;
TupleWrapper(const YODA::Scatter2D & h) : YODA::Scatter2D(h) {}
};
template<>
class TupleWrapper<YODA::Scatter3D> : public YODA::Scatter3D {
public:
typedef shared_ptr<TupleWrapper<YODA::Scatter3D>> Ptr;
TupleWrapper(const YODA::Scatter3D & h) : YODA::Scatter3D(h) {}
};
template <class T>
- class Wrapper : public MultiweightAOPtr {
+ class Wrapper : public MultiweightAOWrapper {
friend class Analysis;
public:
+ using Inner = T;
/* @todo
* some things are not really well-defined here
* for instance: fill() in the finalize() method and integral() in
* the analyze() method.
*/
Wrapper() = default;
Wrapper(size_t len_of_weightvec, const T & p);
~Wrapper();
typename T::Ptr active() const;
/* @todo this probably need to loop over all? */
bool operator!() const { return !_active; } // Don't use active() here, assert will catch
operator bool() const { return static_cast<bool>(_active); } // Don't use active() here, assert will catch
T * operator->() { return active().get(); }
T * operator->() const { return active().get(); }
T & operator*() { return *active(); }
const T & operator*() const { return *active(); }
/* @todo
* these need to be re-thought out.
void reset() { active()->reset(); }
*/
/* @todo
* these probably need to loop over all?
* do we even want to provide equality?
*/
/* @todo
* how about no.
friend bool operator==(Wrapper a, Wrapper b){
if (a._persistent.size() != b._persistent.size())
return false;
for (size_t i = 0; i < a._persistent.size(); i++) {
if (a._persistent.at(i) != b._persistent.at(i)) {
return false;
}
}
return true;
}
friend bool operator!=(Wrapper a, Wrapper b){
return !(a == b);
}
friend bool operator<(Wrapper a, Wrapper b){
if (a._persistent.size() >= b._persistent.size())
return false;
for (size_t i = 0; i < a._persistent.size(); i++) {
if (*(a._persistent.at(i)) >= *(b._persistent.at(i))) {
return false;
}
}
return true;
}
*/
private:
void setActiveWeightIdx(unsigned int iWeight) {
_active = _persistent.at(iWeight);
}
/* this is for dev only---we shouldn't need this in real runs. */
void unsetActiveWeight() { _active.reset(); }
void newSubEvent();
virtual YODA::AnalysisObjectPtr activeYODAPtr() const { return _active; }
const vector<typename T::Ptr> & persistent() const { return _persistent; }
/* to be implemented for each type */
void pushToPersistent(const vector<valarray<double> >& weight);
/// Set destructor blocking flag.
/// Once booked in an analysis, we need to block the destructor
/// from being called until we're done with finalize.
/// This gives a clearer indication to the users that they're booking with
/// temporary objects.
void blockDestructor(bool b) { _blockDestructor = b; }
/* M of these, one for each weight */
vector<typename T::Ptr> _persistent;
/* N of these, one for each event in evgroup */
vector<typename TupleWrapper<T>::Ptr> _evgroup;
typename T::Ptr _active;
/// Destructor blocking flag.
/// Once booked in an analysis, we need to block the destructor
/// from being called until we're done with finalize.
///
bool _blockDestructor = false;
// do we need implicit cast?
// operator typename T::Ptr () {
// return _active;
// }
friend class AnalysisHandler;
};
+/// We need our own shared_ptr class, so we can dispatch -> and *
+/// all the way down to the inner YODA analysis objects
+///
+/// TODO: provide remaining functionality that shared_ptr has (not needed right now)
+///
+template <typename T>
+class rivet_shared_ptr {
+public:
+ rivet_shared_ptr() = default;
+
+ /// Convenience constructor, pass through to the Wrapper constructor
+ rivet_shared_ptr(size_t len_of_weightvec, const typename T::Inner & p)
+ : _p( make_shared<T>(len_of_weightvec, p) )
+ {}
+
+ template <typename U>
+ rivet_shared_ptr(const shared_ptr<U> & p)
+ : _p(p)
+ {}
+
+ template <typename U>
+ rivet_shared_ptr(const rivet_shared_ptr<U> & p)
+ : _p(p.get())
+ {}
+
+ // Goes right through to the active YODA object's members
+ T & operator->() { return *_p; }
+ const T & operator->() const { return *_p; }
+
+ // The active YODA object
+ typename T::Inner & operator*() { return **_p; }
+ const typename T::Inner & operator*() const { return **_p; }
+
+ bool operator!() const { return !_p || !(*_p); }
+ operator bool() const { return _p && bool(*_p); }
+
+ template <typename U>
+ bool operator==(const rivet_shared_ptr<U> & other) const {
+ return _p == other._p;
+ }
+
+ shared_ptr<T> get() const { return _p; }
+private:
+ shared_ptr<T> _p;
+};
+
+
// every object listed here needs a virtual fill method in YODA,
// otherwise the Tuple fakery won't work.
- using Histo1DPtr = Wrapper<YODA::Histo1D>;
- using Histo2DPtr = Wrapper<YODA::Histo2D>;
- using Profile1DPtr = Wrapper<YODA::Profile1D>;
- using Profile2DPtr = Wrapper<YODA::Profile2D>;
- using CounterPtr = Wrapper<YODA::Counter>;
- using Scatter1DPtr = Wrapper<YODA::Scatter1D>;
- using Scatter2DPtr = Wrapper<YODA::Scatter2D>;
- using Scatter3DPtr = Wrapper<YODA::Scatter3D>;
+ using MultiweightAOPtr = rivet_shared_ptr<MultiweightAOWrapper>;
+
+ using Histo1DPtr = rivet_shared_ptr<Wrapper<YODA::Histo1D>>;
+ using Histo2DPtr = rivet_shared_ptr<Wrapper<YODA::Histo2D>>;
+ using Profile1DPtr = rivet_shared_ptr<Wrapper<YODA::Profile1D>>;
+ using Profile2DPtr = rivet_shared_ptr<Wrapper<YODA::Profile2D>>;
+ using CounterPtr = rivet_shared_ptr<Wrapper<YODA::Counter>>;
+ using Scatter1DPtr = rivet_shared_ptr<Wrapper<YODA::Scatter1D>>;
+ using Scatter2DPtr = rivet_shared_ptr<Wrapper<YODA::Scatter2D>>;
+ using Scatter3DPtr = rivet_shared_ptr<Wrapper<YODA::Scatter3D>>;
using YODA::Counter;
using YODA::Histo1D;
using YODA::HistoBin1D;
using YODA::Histo2D;
using YODA::HistoBin2D;
using YODA::Profile1D;
using YODA::ProfileBin1D;
using YODA::Profile2D;
using YODA::ProfileBin2D;
using YODA::Scatter1D;
using YODA::Point1D;
using YODA::Scatter2D;
using YODA::Point2D;
using YODA::Scatter3D;
using YODA::Point3D;
/// Function to get a map of all the refdata in a paper with the
/// given @a papername.
map<string, YODA::AnalysisObjectPtr> getRefData(const string& papername);
/// @todo Also provide a Scatter3D getRefData() version?
/// Get the file system path to the reference file for this paper.
string getDatafilePath(const string& papername);
}
#endif
diff --git a/src/AnalysisTools/MC_JetAnalysis.cc b/src/AnalysisTools/MC_JetAnalysis.cc
--- a/src/AnalysisTools/MC_JetAnalysis.cc
+++ b/src/AnalysisTools/MC_JetAnalysis.cc
@@ -1,185 +1,185 @@
// -*- C++ -*-
#include "Rivet/Analyses/MC_JetAnalysis.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
MC_JetAnalysis::MC_JetAnalysis(const string& name,
size_t njet,
const string& jetpro_name,
double jetptcut)
: Analysis(name), _njet(njet), _jetpro_name(jetpro_name), _jetptcut(jetptcut),
_h_pT_jet(njet),
_h_eta_jet(njet), _h_eta_jet_plus(njet), _h_eta_jet_minus(njet),
_h_rap_jet(njet), _h_rap_jet_plus(njet), _h_rap_jet_minus(njet),
_h_mass_jet(njet), tmpeta(njet), tmprap(njet)
{
setNeedsCrossSection(true); // legitimate use, since a base class has no .info file!
}
// Book histograms
void MC_JetAnalysis::init() {
const double sqrts = sqrtS() ? sqrtS() : 14000.*GeV;
for (size_t i = 0; i < _njet; ++i) {
const string pTname = "jet_pT_" + to_str(i+1);
const double pTmax = 1.0/(double(i)+2.0) * sqrts/GeV/2.0;
const int nbins_pT = 100/(i+1);
if (pTmax > 10) { // Protection aginst logspace exception, needed for LEP
book(_h_pT_jet[i] ,pTname, logspace(nbins_pT, 10.0, pTmax));
}
const string massname = "jet_mass_" + to_str(i+1);
const double mmax = 100.0;
const int nbins_m = 100/(i+1);
book(_h_mass_jet[i] ,massname, logspace(nbins_m, 1.0, mmax));
const string etaname = "jet_eta_" + to_str(i+1);
book(_h_eta_jet[i] ,etaname, i > 1 ? 25 : 50, -5.0, 5.0);
book(_h_eta_jet_plus[i], "_" + etaname + "_plus", i > 1 ? 15 : 25, 0, 5);
book(_h_eta_jet_minus[i], "_" + etaname + "_minus", i > 1 ? 15 : 25, 0, 5);
const string rapname = "jet_y_" + to_str(i+1);
book(_h_rap_jet[i] ,rapname, i>1 ? 25 : 50, -5.0, 5.0);
book(_h_rap_jet_plus[i], "_" + rapname + "_plus", i > 1 ? 15 : 25, 0, 5);
book(_h_rap_jet_minus[i], "_" + rapname + "_minus", i > 1 ? 15 : 25, 0, 5);
book(tmpeta[i], "jet_eta_pmratio_" + to_str(i+1));
book(tmprap[i], "jet_y_pmratio_" + to_str(i+1));
for (size_t j = i+1; j < min(size_t(3), _njet); ++j) {
const std::pair<size_t, size_t> ij = std::make_pair(i, j);
const string ijstr = to_str(i+1) + to_str(j+1);
string detaname = "jets_deta_" + ijstr;
book(_h_deta_jets[ij], detaname, 25, -5.0, 5.0);
string dphiname = "jets_dphi_" + ijstr;
book(_h_dphi_jets[ij], dphiname, 25, 0.0, M_PI);
string dRname = "jets_dR_" + ijstr;
book(_h_dR_jets[ij], dRname, 25, 0.0, 5.0);
}
}
book(_h_jet_multi_exclusive ,"jet_multi_exclusive", _njet+3, -0.5, _njet+3-0.5);
book(_h_jet_multi_inclusive ,"jet_multi_inclusive", _njet+3, -0.5, _njet+3-0.5);
book(_h_jet_multi_ratio, "jet_multi_ratio");
book(_h_jet_HT ,"jet_HT", logspace(50, _jetptcut, sqrts/GeV/2.0));
book(_h_mjj_jets, "jets_mjj", 40, 0.0, sqrts/GeV/2.0);
}
// Do the analysis
void MC_JetAnalysis::analyze(const Event & e) {
const Jets& jets = apply<FastJets>(e, _jetpro_name).jetsByPt(_jetptcut);
for (size_t i = 0; i < _njet; ++i) {
if (jets.size() < i+1) continue;
_h_pT_jet[i]->fill(jets[i].pT()/GeV);
// Check for numerical precision issues with jet masses
double m2_i = jets[i].mass2();
if (m2_i < 0) {
if (m2_i < -1e-4) {
MSG_WARNING("Jet mass2 is negative: " << m2_i << " GeV^2. "
<< "Truncating to 0.0, assuming numerical precision is to blame.");
}
m2_i = 0.0;
}
// Jet mass
_h_mass_jet[i]->fill(sqrt(m2_i)/GeV);
// Jet eta
const double eta_i = jets[i].eta();
_h_eta_jet[i]->fill(eta_i);
(eta_i > 0.0 ? _h_eta_jet_plus : _h_eta_jet_minus)[i]->fill(fabs(eta_i));
// Jet rapidity
const double rap_i = jets[i].rapidity();
_h_rap_jet[i]->fill(rap_i);
(rap_i > 0.0 ? _h_rap_jet_plus : _h_rap_jet_minus)[i]->fill(fabs(rap_i));
// Inter-jet properties
for (size_t j = i+1; j < min(size_t(3),_njet); ++j) {
if (jets.size() < j+1) continue;
std::pair<size_t, size_t> ij = std::make_pair(i, j);
double deta = jets[i].eta()-jets[j].eta();
double dphi = deltaPhi(jets[i].momentum(),jets[j].momentum());
double dR = deltaR(jets[i].momentum(), jets[j].momentum());
_h_deta_jets[ij]->fill(deta);
_h_dphi_jets[ij]->fill(dphi);
_h_dR_jets[ij]->fill(dR);
}
}
// Multiplicities
_h_jet_multi_exclusive->fill(jets.size());
for (size_t i = 0; i < _njet+2; ++i) {
if (jets.size() >= i) {
_h_jet_multi_inclusive->fill(i);
}
}
// HT
double HT = 0.0;
foreach (const Jet& jet, jets) {
HT += jet.pT();
}
_h_jet_HT->fill(HT);
// mjj
if (jets.size() > 1) {
double mjj = (jets[0].momentum() + jets[1].momentum()).mass();
_h_mjj_jets->fill(mjj);
}
}
// Finalize
void MC_JetAnalysis::finalize() {
const double scaling = crossSection()/sumOfWeights();
for (size_t i = 0; i < _njet; ++i) {
scale(_h_pT_jet[i], scaling);
scale(_h_mass_jet[i], scaling);
scale(_h_eta_jet[i], scaling);
scale(_h_rap_jet[i], scaling);
// Create eta/rapidity ratio plots
- divide(*_h_eta_jet_plus[i], *_h_eta_jet_minus[i], tmpeta[i]);
- divide(*_h_rap_jet_plus[i], *_h_rap_jet_minus[i], tmprap[i]);
+ divide(_h_eta_jet_plus[i], _h_eta_jet_minus[i], tmpeta[i]);
+ divide(_h_rap_jet_plus[i], _h_rap_jet_minus[i], tmprap[i]);
}
// Scale the d{eta,phi,R} histograms
typedef map<pair<size_t, size_t>, Histo1DPtr> HistMap;
for (HistMap::value_type& it : _h_deta_jets) scale(it.second, scaling);
for (HistMap::value_type& it : _h_dphi_jets) scale(it.second, scaling);
for (HistMap::value_type& it : _h_dR_jets) scale(it.second, scaling);
// Fill inclusive jet multi ratio
int Nbins = _h_jet_multi_inclusive->numBins();
for (int i = 0; i < Nbins-1; ++i) {
_h_jet_multi_ratio->addPoint(i+1, 0, 0.5, 0);
if (_h_jet_multi_inclusive->bin(i).sumW() > 0.0) {
const double ratio = _h_jet_multi_inclusive->bin(i+1).sumW()/_h_jet_multi_inclusive->bin(i).sumW();
const double relerr_i = _h_jet_multi_inclusive->bin(i).relErr();
const double relerr_j = _h_jet_multi_inclusive->bin(i+1).relErr();
const double err = ratio * (relerr_i + relerr_j);
_h_jet_multi_ratio->point(i).setY(ratio, err);
}
}
scale(_h_jet_multi_exclusive, scaling);
scale(_h_jet_multi_inclusive, scaling);
scale(_h_jet_HT, scaling);
scale(_h_mjj_jets, scaling);
}
}
diff --git a/src/AnalysisTools/MC_ParticleAnalysis.cc b/src/AnalysisTools/MC_ParticleAnalysis.cc
--- a/src/AnalysisTools/MC_ParticleAnalysis.cc
+++ b/src/AnalysisTools/MC_ParticleAnalysis.cc
@@ -1,167 +1,168 @@
// -*- C++ -*-
#include "Rivet/Analyses/MC_ParticleAnalysis.hh"
namespace Rivet {
MC_ParticleAnalysis::MC_ParticleAnalysis(const string& name,
size_t nparticles,
const string& particle_name)
: Analysis(name),
_nparts(nparticles), _pname(particle_name),
_h_pt(nparticles),
_h_eta(nparticles), _h_eta_plus(nparticles), _h_eta_minus(nparticles),
- _h_rap(nparticles), _h_rap_plus(nparticles), _h_rap_minus(nparticles)
+ _h_rap(nparticles), _h_rap_plus(nparticles), _h_rap_minus(nparticles),
+ tmpeta(nparticles), tmprap(nparticles)
{
setNeedsCrossSection(true); // legitimate use, since a base class has no .info file!
}
// Book histograms
void MC_ParticleAnalysis::init() {
for (size_t i = 0; i < _nparts; ++i) {
+ book(tmpeta[i], _pname + "_eta_pmratio_" + to_str(i+1));
+ book(tmprap[i], _pname + "_y_pmratio_" + to_str(i+1));
+
const string ptname = _pname + "_pt_" + to_str(i+1);
const double ptmax = 1.0/(double(i)+2.0) * (sqrtS()>0.?sqrtS():14000.)/GeV/2.0;
const int nbins_pt = 100/(i+1);
book(_h_pt[i] ,ptname, logspace(nbins_pt, 1.0, ptmax));
const string etaname = _pname + "_eta_" + to_str(i+1);
book(_h_eta[i] ,etaname, i > 1 ? 25 : 50, -5.0, 5.0);
book(_h_eta_plus[i], "_" + etaname + "_plus", i > 1 ? 15 : 25, 0, 5);
book(_h_eta_minus[i], "_" + etaname + "_minus", i > 1 ? 15 : 25, 0, 5);
const string rapname = _pname + "_y_" + to_str(i+1);
book(_h_rap[i] ,rapname, i > 1 ? 25 : 50, -5.0, 5.0);
book(_h_rap_plus[i], "_" + rapname + "_plus", i > 1 ? 15 : 25, 0, 5);
book(_h_rap_minus[i], "_" + rapname + "_minus", i > 1 ? 15 : 25, 0, 5);
for (size_t j = i+1; j < min(size_t(3), _nparts); ++j) {
const pair<size_t, size_t> ij = std::make_pair(i, j);
string detaname = _pname + "s_deta_" + to_str(i+1) + to_str(j+1);
Histo1DPtr tmpeta;
book(tmpeta, detaname, 25, -5.0, 5.0);
_h_deta.insert(make_pair(ij, tmpeta));
string dphiname = _pname + "s_dphi_" + to_str(i+1) + to_str(j+1);
Histo1DPtr tmpphi;
book(tmpphi, dphiname, 25, 0.0, M_PI);
_h_dphi.insert(make_pair(ij, tmpphi));
string dRname = _pname + "s_dR_" + to_str(i+1) + to_str(j+1);
Histo1DPtr tmpR;
book(tmpR, dRname, 25, 0.0, 5.0);
_h_dR.insert(make_pair(ij, tmpR));
}
}
book(_h_multi_exclusive ,_pname + "_multi_exclusive", _nparts+3, -0.5, _nparts+3-0.5);
book(_h_multi_inclusive ,_pname + "_multi_inclusive", _nparts+3, -0.5, _nparts+3-0.5);
book(_h_multi_ratio, _pname + "_multi_ratio");
book(_h_multi_exclusive_prompt ,_pname + "_multi_exclusive_prompt", _nparts+3, -0.5, _nparts+3-0.5);
book(_h_multi_inclusive_prompt ,_pname + "_multi_inclusive_prompt", _nparts+3, -0.5, _nparts+3-0.5);
book(_h_multi_ratio_prompt, _pname + "_multi_ratio_prompt");
}
// Do the analysis
void MC_ParticleAnalysis::_analyze(const Event& event, const Particles& particles) {
Particles promptparticles;
foreach (const Particle& p, particles)
if (!p.fromDecay()) promptparticles += p;
for (size_t i = 0; i < _nparts; ++i) {
if (particles.size() < i+1) continue;
_h_pt[i]->fill(particles[i].pt()/GeV);
// Eta
const double eta_i = particles[i].eta();
_h_eta[i]->fill(eta_i);
(eta_i > 0.0 ? _h_eta_plus : _h_eta_minus)[i]->fill(fabs(eta_i));
// Rapidity
const double rap_i = particles[i].rapidity();
_h_rap[i]->fill(rap_i);
(rap_i > 0.0 ? _h_rap_plus : _h_rap_minus)[i]->fill(fabs(rap_i));
// Inter-particle properties
for (size_t j = i+1; j < min(size_t(3),_nparts); ++j) {
if (particles.size() < j+1) continue;
std::pair<size_t, size_t> ij = std::make_pair(i, j);
double deta = particles[i].eta() - particles[j].eta();
double dphi = deltaPhi(particles[i].momentum(), particles[j].momentum());
double dR = deltaR(particles[i].momentum(), particles[j].momentum());
_h_deta[ij]->fill(deta);
_h_dphi[ij]->fill(dphi);
_h_dR[ij]->fill(dR);
}
}
// Multiplicities
_h_multi_exclusive->fill(particles.size());
_h_multi_exclusive_prompt->fill(promptparticles.size());
for (size_t i = 0; i < _nparts+2; ++i) {
if (particles.size() >= i) _h_multi_inclusive->fill(i);
if (promptparticles.size() >= i) _h_multi_inclusive_prompt->fill(i);
}
}
// Finalize
void MC_ParticleAnalysis::finalize() {
const double scaling = crossSection()/sumOfWeights();
for (size_t i = 0; i < _nparts; ++i) {
scale(_h_pt[i], scaling);
scale(_h_eta[i], scaling);
scale(_h_rap[i], scaling);
// Create eta/rapidity ratio plots
- Scatter2DPtr tmpeta, tmprap;
- book(tmpeta, _pname + "_eta_pmratio_" + to_str(i+1));
- book(tmprap, _pname + "_y_pmratio_" + to_str(i+1));
- divide(*_h_eta_plus[i], *_h_eta_minus[i], tmpeta);
- divide(*_h_rap_plus[i], *_h_rap_minus[i], tmprap);
+ divide(_h_eta_plus[i], _h_eta_minus[i], tmpeta[i]);
+ divide(_h_rap_plus[i], _h_rap_minus[i], tmprap[i]);
}
// Scale the d{eta,phi,R} histograms
typedef map<pair<size_t, size_t>, Histo1DPtr> HistMap;
for (HistMap::value_type& it : _h_deta) scale(it.second, scaling);
for (HistMap::value_type& it : _h_dphi) scale(it.second, scaling);
for (HistMap::value_type& it : _h_dR) scale(it.second, scaling);
// Fill inclusive multi ratios
for (size_t i = 0; i < _h_multi_inclusive->numBins()-1; ++i) {
_h_multi_ratio->addPoint(i+1, 0, 0.5, 0);
if (_h_multi_inclusive->bin(i).sumW() > 0.0) {
const double ratio = _h_multi_inclusive->bin(i+1).sumW() / _h_multi_inclusive->bin(i).sumW();
const double relerr_i = _h_multi_inclusive->bin(i).relErr();
const double relerr_j = _h_multi_inclusive->bin(i+1).relErr();
const double err = ratio * (relerr_i + relerr_j);
_h_multi_ratio->point(i).setY(ratio, err);
}
}
for (size_t i = 0; i < _h_multi_inclusive_prompt->numBins()-1; ++i) {
_h_multi_ratio_prompt->addPoint(i+1, 0, 0.5, 0);
if (_h_multi_inclusive_prompt->bin(i).sumW() > 0.0) {
const double ratio = _h_multi_inclusive_prompt->bin(i+1).sumW() / _h_multi_inclusive_prompt->bin(i).sumW();
const double relerr_i = _h_multi_inclusive_prompt->bin(i).relErr();
const double relerr_j = _h_multi_inclusive_prompt->bin(i+1).relErr();
const double err = ratio * (relerr_i + relerr_j);
_h_multi_ratio_prompt->point(i).setY(ratio, err);
}
}
scale(_h_multi_exclusive, scaling);
scale(_h_multi_exclusive_prompt, scaling);
scale(_h_multi_inclusive, scaling);
scale(_h_multi_inclusive_prompt, scaling);
}
}
diff --git a/src/Core/Analysis.cc b/src/Core/Analysis.cc
--- a/src/Core/Analysis.cc
+++ b/src/Core/Analysis.cc
@@ -1,926 +1,862 @@
// -*- C++ -*-
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/Analysis.hh"
#include "Rivet/AnalysisHandler.hh"
#include "Rivet/AnalysisInfo.hh"
#include "Rivet/Tools/BeamConstraint.hh"
-#include "DummyConfig.hh"
-#ifdef HAVE_EXECINFO_H
-#include <execinfo.h>
-#endif
+// #include "DummyConfig.hh"
+// #ifdef HAVE_EXECINFO_H
+// #include <execinfo.h>
+// #endif
namespace Rivet {
Analysis::Analysis(const string& name)
: _crossSection(-1.0),
_gotCrossSection(false),
_analysishandler(NULL)
{
ProjectionApplier::_allowProjReg = false;
_defaultname = name;
unique_ptr<AnalysisInfo> ai = AnalysisInfo::make(name);
assert(ai);
_info = move(ai);
assert(_info);
}
double Analysis::sqrtS() const {
return handler().sqrtS();
}
const ParticlePair& Analysis::beams() const {
return handler().beams();
}
const PdgIdPair Analysis::beamIds() const {
return handler().beamIds();
}
const string Analysis::histoDir() const {
/// @todo Cache in a member variable
string _histoDir;
if (_histoDir.empty()) {
_histoDir = "/" + name();
if (handler().runName().length() > 0) {
_histoDir = "/" + handler().runName() + _histoDir;
}
replace_all(_histoDir, "//", "/"); //< iterates until none
}
return _histoDir;
}
const string Analysis::histoPath(const string& hname) const {
const string path = histoDir() + "/" + hname;
return path;
}
const string Analysis::histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return histoDir() + "/" + makeAxisCode(datasetId, xAxisId, yAxisId);
}
const string Analysis::makeAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
stringstream axisCode;
axisCode << "d";
if (datasetId < 10) axisCode << 0;
axisCode << datasetId;
axisCode << "-x";
if (xAxisId < 10) axisCode << 0;
axisCode << xAxisId;
axisCode << "-y";
if (yAxisId < 10) axisCode << 0;
axisCode << yAxisId;
return axisCode.str();
}
Log& Analysis::getLog() const {
string logname = "Rivet.Analysis." + name();
return Log::getLog(logname);
}
size_t Analysis::numEvents() const {
return handler().numEvents();
}
double Analysis::sumOfWeights() const {
return handler().sumOfWeights();
}
///////////////////////////////////////////
bool Analysis::isCompatible(const ParticlePair& beams) const {
return isCompatible(beams.first.pid(), beams.second.pid(),
beams.first.energy(), beams.second.energy());
}
bool Analysis::isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const {
PdgIdPair beams(beam1, beam2);
pair<double,double> energies(e1, e2);
return isCompatible(beams, energies);
}
bool Analysis::isCompatible(const PdgIdPair& beams, const pair<double,double>& energies) const {
// First check the beam IDs
bool beamIdsOk = false;
foreach (const PdgIdPair& bp, requiredBeams()) {
if (compatible(beams, bp)) {
beamIdsOk = true;
break;
}
}
if (!beamIdsOk) return false;
// Next check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness)
/// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles
bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true;
typedef pair<double,double> DoublePair;
foreach (const DoublePair& ep, requiredEnergies()) {
if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) ||
(fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) ||
(abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) ||
(abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) {
beamEnergiesOk = true;
break;
}
}
return beamEnergiesOk;
/// @todo Need to also check internal consistency of the analysis'
/// beam requirements with those of the projections it uses.
}
///////////////////////////////////////////
Analysis& Analysis::setCrossSection(double xs) {
_crossSection = xs;
_gotCrossSection = true;
return *this;
}
double Analysis::crossSection() const {
if (!_gotCrossSection || std::isnan(_crossSection)) {
string errMsg = "You did not set the cross section for the analysis " + name();
throw Error(errMsg);
}
return _crossSection;
}
double Analysis::crossSectionPerEvent() const {
return _crossSection/sumOfWeights();
}
////////////////////////////////////////////////////////////
// Histogramming
void Analysis::_cacheRefData() const {
if (_refdata.empty()) {
MSG_TRACE("Getting refdata cache for paper " << name());
_refdata = getRefData(name());
}
}
CounterPtr & Analysis::book(CounterPtr & ctr,
const string& cname,
const string& title) {
const string path = histoPath(cname);
ctr = CounterPtr(handler().numWeights(), Counter(path, title));
addAnalysisObject(ctr);
MSG_TRACE("Made counter " << cname << " for " << name());
return ctr;
}
CounterPtr & Analysis::book(CounterPtr & ctr, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const string& title) {
// const string& xtitle,
// const string& ytitle) {
const string axisCode = makeAxisCode(datasetId, xAxisId, yAxisId);
return book(ctr, axisCode, title);
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
size_t nbins, double lower, double upper,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Histo1D hist = Histo1D(nbins, lower, upper, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
histo = Histo1DPtr(handler().numWeights(), hist);
addAnalysisObject(histo);
MSG_TRACE("Made histogram " << hname << " for " << name());
return histo;
// Histo1DPtr hist;
// try { // try to bind to pre-existing
// // AnalysisObjectPtr ao = getAnalysisObject(path);
// // hist = dynamic_pointer_cast<Histo1D>(ao);
// hist = getHisto1D(hname);
// /// @todo Test that cast worked
// /// @todo Also test that binning is as expected?
// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name());
// } catch (...) { // binding failed; make it from scratch
// hist = make_shared<Histo1D>(nbins, lower, upper, histoPath(hname), title);
// addAnalysisObject(hist);
// MSG_TRACE("Made histogram " << hname << " for " << name());
// }
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const initializer_list<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
return book(histo, hname, vector<double>{binedges}, title, xtitle, ytitle);
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const vector<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
// Histo1DPtr hist;
// try { // try to bind to pre-existing
// // AnalysisObjectPtr ao = getAnalysisObject(path);
// // hist = dynamic_pointer_cast<Histo1D>(ao);
// hist = getHisto1D(hname);
// /// @todo Test that cast worked
// /// @todo Also test that binning is as expected?
// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name());
// } catch (...) { // binding failed; make it from scratch
// hist = make_shared<Histo1D>(binedges, histoPath(hname), title);
// addAnalysisObject(hist);
// MSG_TRACE("Made histogram " << hname << " for " << name());
// }
Histo1D hist = Histo1D(binedges, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
histo = Histo1DPtr(handler().numWeights(), hist);
addAnalysisObject(histo);
MSG_TRACE("Made histogram " << hname << " for " << name());
return histo;
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const Scatter2D& refscatter,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
// Histo1DPtr hist;
// try { // try to bind to pre-existing
// // AnalysisObjectPtr ao = getAnalysisObject(path);
// // hist = dynamic_pointer_cast<Histo1D>(ao);
// hist = getHisto1D(hname);
// /// @todo Test that cast worked
// /// @todo Also test that binning is as expected?
// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name());
// } catch (...) { // binding failed; make it from scratch
// hist = make_shared<Histo1D>(refscatter, histoPath(hname));
// if (hist->hasAnnotation("IsRef")) hist->rmAnnotation("IsRef");
// addAnalysisObject(hist);
// MSG_TRACE("Made histogram " << hname << " for " << name());
// }
Histo1D hist = Histo1D(refscatter, path);
hist.setTitle(title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
histo = Histo1DPtr(handler().numWeights(), hist);
addAnalysisObject(histo);
MSG_TRACE("Made histogram " << hname << " for " << name());
return histo;
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const string& title,
const string& xtitle,
const string& ytitle) {
const Scatter2D& refdata = refData(hname);
return book(histo, hname, refdata, title, xtitle, ytitle);
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const string& title,
const string& xtitle,
const string& ytitle) {
const string axisCode = makeAxisCode(datasetId, xAxisId, yAxisId);
return book(histo, axisCode, title, xtitle, ytitle);
}
/// @todo Add booking methods which take a path, titles and *a reference Scatter from which to book*
/////////////////
Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Histo2D hist(nxbins, xlower, xupper, nybins, ylower, yupper, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
hist.setAnnotation("ZLabel", ztitle);
h2d = Histo2DPtr(handler().numWeights(), hist);
addAnalysisObject(h2d);
MSG_TRACE("Made 2D histogram " << hname << " for " << name());
return h2d;
}
Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname,
const initializer_list<double>& xbinedges,
const initializer_list<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
return book(h2d, hname, vector<double>{xbinedges}, vector<double>{ybinedges}, title, xtitle, ytitle, ztitle);
}
Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname,
const vector<double>& xbinedges,
const vector<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Histo2D hist(xbinedges, ybinedges, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
hist.setAnnotation("ZLabel", ztitle);
h2d = Histo2DPtr(handler().numWeights(), hist);
addAnalysisObject(h2d);
MSG_TRACE("Made 2D histogram " << hname << " for " << name());
return h2d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
size_t nbins, double lower, double upper,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Profile1D prof(nbins, lower, upper, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
p1d = Profile1DPtr(handler().numWeights(), prof);
addAnalysisObject(p1d);
MSG_TRACE("Made profile histogram " << hname << " for " << name());
return p1d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const initializer_list<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
return book(p1d, hname, vector<double>{binedges}, title, xtitle, ytitle);
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const vector<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Profile1D prof(binedges, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
p1d = Profile1DPtr(handler().numWeights(), prof);
addAnalysisObject(p1d);
MSG_TRACE("Made profile histogram " << hname << " for " << name());
return p1d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const Scatter2D& refscatter,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Profile1D prof(refscatter, path);
prof.setTitle(title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
p1d = Profile1DPtr(handler().numWeights(), prof);
addAnalysisObject(p1d);
MSG_TRACE("Made profile histogram " << hname << " for " << name());
return p1d;
// if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef");
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const string& title,
const string& xtitle,
const string& ytitle) {
const Scatter2D& refdata = refData(hname);
book(p1d, hname, refdata, title, xtitle, ytitle);
return p1d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const string& title,
const string& xtitle,
const string& ytitle) {
const string axisCode = makeAxisCode(datasetId, xAxisId, yAxisId);
return book(p1d, axisCode, title, xtitle, ytitle);
}
Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Profile2D prof(nxbins, xlower, xupper, nybins, ylower, yupper, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
prof.setAnnotation("ZLabel", ztitle);
p2d = Profile2DPtr(handler().numWeights(), prof);
addAnalysisObject(p2d);
MSG_TRACE("Made 2D profile histogram " << hname << " for " << name());
return p2d;
}
Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname,
const initializer_list<double>& xbinedges,
const initializer_list<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
return book(p2d, hname, vector<double>{xbinedges}, vector<double>{ybinedges}, title, xtitle, ytitle, ztitle);
}
Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname,
const vector<double>& xbinedges,
const vector<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Profile2D prof(xbinedges, ybinedges, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
prof.setAnnotation("ZLabel", ztitle);
p2d = Profile2DPtr(handler().numWeights(), prof);
addAnalysisObject(p2d);
MSG_TRACE("Made 2D profile histogram " << hname << " for " << name());
return p2d;
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
bool copy_pts,
const string& title,
const string& xtitle,
const string& ytitle) {
const string axisCode = makeAxisCode(datasetId, xAxisId, yAxisId);
return book(s2d, axisCode, copy_pts, title, xtitle, ytitle);
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname,
bool copy_pts,
const string& title,
const string& xtitle,
const string& ytitle) {
Scatter2D scat;
const string path = histoPath(hname);
if (copy_pts) {
const Scatter2D& refdata = refData(hname);
scat = Scatter2D(refdata, path);
for (Point2D& p : scat.points()) p.setY(0, 0);
} else {
scat = Scatter2D(path);
}
scat.setTitle(title);
scat.setAnnotation("XLabel", xtitle);
scat.setAnnotation("YLabel", ytitle);
s2d = Scatter2DPtr(handler().numWeights(), scat);
addAnalysisObject(s2d);
MSG_TRACE("Made scatter " << hname << " for " << name());
// if (scat.hasAnnotation("IsRef")) scat.rmAnnotation("IsRef");
return s2d;
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname,
size_t npts, double lower, double upper,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Scatter2D scat;
const double binwidth = (upper-lower)/npts;
for (size_t pt = 0; pt < npts; ++pt) {
const double bincentre = lower + (pt + 0.5) * binwidth;
scat.addPoint(bincentre, 0, binwidth/2.0, 0);
}
scat.setTitle(title);
scat.setAnnotation("XLabel", xtitle);
scat.setAnnotation("YLabel", ytitle);
s2d = Scatter2DPtr(handler().numWeights(), scat);
addAnalysisObject(s2d);
MSG_TRACE("Made scatter " << hname << " for " << name());
return s2d;
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname,
const vector<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Scatter2D scat;
for (size_t pt = 0; pt < binedges.size()-1; ++pt) {
const double bincentre = (binedges[pt] + binedges[pt+1]) / 2.0;
const double binwidth = binedges[pt+1] - binedges[pt];
scat.addPoint(bincentre, 0, binwidth/2.0, 0);
}
scat.setTitle(title);
scat.setAnnotation("XLabel", xtitle);
scat.setAnnotation("YLabel", ytitle);
s2d = Scatter2DPtr(handler().numWeights(), scat);
addAnalysisObject(s2d);
MSG_TRACE("Made scatter " << hname << " for " << name());
return s2d;
}
void Analysis::divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const {
const string path = s->path();
*s = *c1 / *c2;
s->setPath(path);
}
void Analysis::divide(const Counter& c1, const Counter& c2, Scatter1DPtr s) const {
const string path = s->path();
*s = c1 / c2;
s->setPath(path);
}
void Analysis::divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const {
const string path = s->path();
*s = *h1 / *h2;
s->setPath(path);
}
void Analysis::divide(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const {
const string path = s->path();
*s = h1 / h2;
s->setPath(path);
}
void Analysis::divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const {
const string path = s->path();
*s = *p1 / *p2;
s->setPath(path);
}
void Analysis::divide(const Profile1D& p1, const Profile1D& p2, Scatter2DPtr s) const {
const string path = s->path();
*s = p1 / p2;
s->setPath(path);
}
void Analysis::divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const {
const string path = s->path();
*s = *h1 / *h2;
s->setPath(path);
}
void Analysis::divide(const Histo2D& h1, const Histo2D& h2, Scatter3DPtr s) const {
const string path = s->path();
*s = h1 / h2;
s->setPath(path);
}
void Analysis::divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const {
const string path = s->path();
*s = *p1 / *p2;
s->setPath(path);
}
void Analysis::divide(const Profile2D& p1, const Profile2D& p2, Scatter3DPtr s) const {
const string path = s->path();
*s = p1 / p2;
s->setPath(path);
}
/// @todo Counter and Histo2D efficiencies and asymms
void Analysis::efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::efficiency(*h1, *h2);
s->setPath(path);
}
void Analysis::efficiency(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::efficiency(h1, h2);
s->setPath(path);
}
void Analysis::asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::asymm(*h1, *h2);
s->setPath(path);
}
void Analysis::asymm(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::asymm(h1, h2);
s->setPath(path);
}
void Analysis::scale(CounterPtr cnt, double factor) {
if (!cnt) {
MSG_WARNING("Failed to scale counter=NULL in analysis " << name() << " (scale=" << factor << ")");
return;
}
if (std::isnan(factor) || std::isinf(factor)) {
MSG_WARNING("Failed to scale counter=" << cnt->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")");
factor = 0;
}
MSG_TRACE("Scaling counter " << cnt->path() << " by factor " << factor);
try {
cnt->scaleW(factor);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not scale counter " << cnt->path());
return;
}
}
void Analysis::normalize(Histo1DPtr histo, double norm, bool includeoverflows) {
if (!histo) {
MSG_WARNING("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << norm << ")");
return;
}
MSG_TRACE("Normalizing histo " << histo->path() << " to " << norm);
try {
histo->normalize(norm, includeoverflows);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not normalize histo " << histo->path());
return;
}
}
void Analysis::scale(Histo1DPtr histo, double factor) {
if (!histo) {
MSG_WARNING("Failed to scale histo=NULL in analysis " << name() << " (scale=" << factor << ")");
return;
}
if (std::isnan(factor) || std::isinf(factor)) {
MSG_WARNING("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")");
factor = 0;
}
MSG_TRACE("Scaling histo " << histo->path() << " by factor " << factor);
try {
histo->scaleW(factor);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not scale histo " << histo->path());
return;
}
}
void Analysis::normalize(Histo2DPtr histo, double norm, bool includeoverflows) {
if (!histo) {
MSG_ERROR("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << norm << ")");
return;
}
MSG_TRACE("Normalizing histo " << histo->path() << " to " << norm);
try {
histo->normalize(norm, includeoverflows);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not normalize histo " << histo->path());
return;
}
}
void Analysis::scale(Histo2DPtr histo, double factor) {
if (!histo) {
MSG_ERROR("Failed to scale histo=NULL in analysis " << name() << " (scale=" << factor << ")");
return;
}
if (std::isnan(factor) || std::isinf(factor)) {
MSG_ERROR("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")");
factor = 0;
}
MSG_TRACE("Scaling histo " << histo->path() << " by factor " << factor);
try {
histo->scaleW(factor);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not scale histo " << histo->path());
return;
}
}
void Analysis::integrate(Histo1DPtr h, Scatter2DPtr s) const {
// preserve the path info
const string path = s->path();
*s = toIntegralHisto(*h);
s->setPath(path);
}
void Analysis::integrate(const Histo1D& h, Scatter2DPtr s) const {
// preserve the path info
const string path = s->path();
*s = toIntegralHisto(h);
s->setPath(path);
}
}
/// @todo 2D versions of integrate... defined how, exactly?!?
//////////////////////////////////
namespace {
void errormsg(std::string name) {
-#ifdef HAVE_BACKTRACE
- void * buffer[4];
- backtrace(buffer, 4);
- backtrace_symbols_fd(buffer, 4 , 1);
-#endif
+// #ifdef HAVE_BACKTRACE
+// void * buffer[4];
+// backtrace(buffer, 4);
+// backtrace_symbols_fd(buffer, 4 , 1);
+// #endif
std::cerr << name << ": Can't book objects outside of init().\n";
assert(false);
}
}
namespace Rivet {
- // @todo
- // special handling for scatters
- void Analysis::addAnalysisObject(const shared_ptr<Scatter1DPtr>& ao) {
+
+ void Analysis::addAnalysisObject(const MultiweightAOPtr & ao) {
if (handler().stage() == AnalysisHandler::Stage::INIT) {
- _scatters.push_back(ao);
- ao.get()->blockDestructor(true);
- }
- else {
- errormsg(name());
- }
- }
- void Analysis::addAnalysisObject(const shared_ptr<Scatter2DPtr>& ao) {
- if (handler().stage() == AnalysisHandler::Stage::INIT) {
- _scatters.push_back(ao);
- ao.get()->blockDestructor(true);
- }
- else {
- errormsg(name());
- }
- }
- void Analysis::addAnalysisObject(const shared_ptr<Scatter3DPtr>& ao) {
- if (handler().stage() == AnalysisHandler::Stage::INIT) {
- _scatters.push_back(ao);
+ _analysisobjects.push_back(ao);
ao.get()->blockDestructor(true);
}
else {
errormsg(name());
}
}
- void Analysis::addAnalysisObject(MultiweightAOPtr & ao) {
- if (handler().stage() == AnalysisHandler::Stage::INIT) {
- _analysisobjects.push_back(ao);
- ao.blockDestructor(true);
- }
- else {
- errormsg(name());
- }
- }
-
void Analysis::removeAnalysisObject(const string& path) {
for (auto it = _analysisobjects.begin();
it != _analysisobjects.end(); ++it) {
- if ((*it).get()->path() == path) {
+ if ((*it)->path() == path) {
_analysisobjects.erase(it);
break;
}
}
- for (auto it = _scatters.begin(); it != _scatters.end(); ++it) {
- if ((**it)->path() == path) {
- _scatters.erase(it);
- break;
- }
- }
}
-
- /// @todo can we really remove (multiweighted) analysis objects by == operator??
- void Analysis::removeAnalysisObject(const Scatter1DPtr& ao) {
- for (auto it = _scatters.begin(); it != _scatters.end(); ++it) {
- if (**it == ao) {
- _scatters.erase(it);
- break;
- }
- }
- }
-
- void Analysis::removeAnalysisObject(const Scatter2DPtr& ao) {
- for (auto it = _scatters.begin(); it != _scatters.end(); ++it) {
- if (**it == ao) {
- _scatters.erase(it);
- break;
- }
- }
- }
-
- void Analysis::removeAnalysisObject(const Scatter3DPtr& ao) {
- for (auto it = _scatters.begin(); it != _scatters.end(); ++it) {
- if (**it == ao) {
- _scatters.erase(it);
- break;
- }
- }
- }
-
- void Analysis::removeAnalysisObject(const MultiweightAOPtr& ao) {
+ void Analysis::removeAnalysisObject(const MultiweightAOPtr & ao) {
for (auto it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) {
- if ((*it).get() == ao) {
+ if ((*it) == ao) {
_analysisobjects.erase(it);
break;
}
}
}
}
diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc
--- a/src/Core/AnalysisHandler.cc
+++ b/src/Core/AnalysisHandler.cc
@@ -1,524 +1,524 @@
// -*- C++ -*-
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/AnalysisHandler.hh"
#include "Rivet/Analysis.hh"
#include "Rivet/Tools/ParticleName.hh"
#include "Rivet/Tools/BeamConstraint.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Projections/Beam.hh"
#include "YODA/ReaderYODA.h"
#include "YODA/WriterYODA.h"
#include <regex>
namespace {
inline std::vector<std::string> split(const std::string& input, const std::string& regex) {
// passing -1 as the submatch index parameter performs splitting
std::regex re(regex);
std::sregex_token_iterator
first{input.begin(), input.end(), re, -1},
last;
return {first, last};
}
}
namespace Rivet {
AnalysisHandler::AnalysisHandler(const string& runname)
: _runname(runname),
_eventCounter(0, Counter()), _xs(NAN),
_initialised(false), _ignoreBeams(false)
{}
AnalysisHandler::~AnalysisHandler()
{}
Log& AnalysisHandler::getLog() const {
return Log::getLog("Rivet.Analysis.Handler");
}
/// http://stackoverflow.com/questions/4654636/how-to-determine-if-a-string-is-a-number-with-c
bool is_number(const std::string& s)
{
std::string::const_iterator it = s.begin();
while (it != s.end() && std::isdigit(*it)) ++it;
return !s.empty() && it == s.end();
}
/// Check if any of the weightnames is not a number
bool AnalysisHandler::haveNamedWeights() {
bool dec=false;
for (unsigned int i=0;i<_weightNames.size();++i) {
string s = _weightNames[i];
if (!is_number(s)) {
dec=true;
break;
}
}
return dec;
}
void AnalysisHandler::init(const GenEvent& ge) {
if (_initialised)
throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!");
setRunBeams(Rivet::beams(ge));
MSG_DEBUG("Initialising the analysis handler");
_eventNumber = ge.event_number();
setWeightNames(ge);
if (haveNamedWeights())
MSG_INFO("Using named weights");
else
MSG_INFO("NOT using named weights. Using first weight as nominal weight");
_numWeightTypes = _weightNames.size();
_eventCounter = CounterPtr(_numWeightTypes, Counter("_EVTCOUNT"));
// Check that analyses are beam-compatible, and remove those that aren't
const size_t num_anas_requested = analysisNames().size();
vector<string> anamestodelete;
for (const AnaHandle a : _analyses) {
if (!_ignoreBeams && !a->isCompatible(beams())) {
//MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV");
anamestodelete.push_back(a->name());
}
}
for (const string& aname : anamestodelete) {
MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing");
removeAnalysis(aname);
}
if (num_anas_requested > 0 && analysisNames().empty()) {
cerr << "All analyses were incompatible with the first event's beams\n"
<< "Exiting, since this probably wasn't intentional!" << endl;
exit(1);
}
// Warn if any analysis' status is not unblemished
for (const AnaHandle a : analyses()) {
if (toUpper(a->status()) == "PRELIMINARY") {
MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!");
} else if (toUpper(a->status()) == "OBSOLETE") {
MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!");
} else if (toUpper(a->status()).find("UNVALIDATED") != string::npos) {
MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!");
}
}
// Initialize the remaining analyses
_stage = Stage::INIT;
for (AnaHandle a : _analyses) {
MSG_DEBUG("Initialising analysis: " << a->name());
try {
// Allow projection registration in the init phase onwards
a->_allowProjReg = true;
a->init();
//MSG_DEBUG("Checking consistency of analysis: " << a->name());
//a->checkConsistency();
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::init method: " << err.what() << endl;
exit(1);
}
MSG_DEBUG("Done initialising analysis: " << a->name());
}
_stage = Stage::OTHER;
_initialised = true;
MSG_DEBUG("Analysis handler initialised");
}
void AnalysisHandler::setWeightNames(const GenEvent& ge) {
/// reroute the print output to a stringstream and process
/// The iteration is done over a map in hepmc2 so this is safe
ostringstream stream;
ge.weights().print(stream); // Super lame, I know
string str = stream.str();
std::regex re("(([^()]+))"); // Regex for stuff enclosed by parentheses ()
for(std::sregex_iterator i = std::sregex_iterator(str.begin(), str.end(), re);
i != std::sregex_iterator(); ++i ) {
std::smatch m = *i;
vector<string> temp = ::split(m.str(), "[,]");
if (temp.size() ==2) {
MSG_DEBUG("Name of weight #" << _weightNames.size() << ": " << temp[0]);
_weightNames.push_back(temp[0]);
}
}
}
void AnalysisHandler::analyze(const GenEvent& ge) {
// Call init with event as template if not already initialised
if (!_initialised) init(ge);
assert(_initialised);
// Ensure that beam details match those from the first event (if we're checking beams)
if ( !_ignoreBeams ) {
const PdgIdPair beams = Rivet::beamIds(ge);
const double sqrts = Rivet::sqrtS(ge);
if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) {
cerr << "Event beams mismatch: "
<< PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams "
<< this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl;
exit(1);
}
}
// Create the Rivet event wrapper
/// @todo Filter/normalize the event here
Event event(ge);
// won't happen for first event because _eventNumber is set in
// init()
if (_eventNumber != ge.event_number()) {
/// @todo
/// can we get away with not passing a matrix?
MSG_TRACE("AnalysisHandler::analyze(): Pushing _eventCounter to persistent.");
- _eventCounter.pushToPersistent(_subEventWeights);
+ _eventCounter.get()->pushToPersistent(_subEventWeights);
// if this is indeed a new event, push the temporary
// histograms and reset
for (const AnaHandle& a : _analyses) {
- for (MultiweightAOPtr & ao : a->analysisObjects()) {
+ for (auto ao : a->analysisObjects()) {
MSG_TRACE("AnalysisHandler::analyze(): Pushing " << a->name() << "'s " << ao->name() << " to persistent.");
- ao.pushToPersistent(_subEventWeights);
+ ao.get()->pushToPersistent(_subEventWeights);
}
MSG_TRACE("AnalysisHandler::analyze(): finished pushing " << a->name() << "'s objects to persistent.");
}
_eventNumber = ge.event_number();
- MSG_DEBUG("nominal event # " << _eventCounter._persistent[0]->numEntries());
- MSG_DEBUG("nominal sum of weights: " << _eventCounter._persistent[0]->sumW());
+ MSG_DEBUG("nominal event # " << _eventCounter.get()->_persistent[0]->numEntries());
+ MSG_DEBUG("nominal sum of weights: " << _eventCounter.get()->_persistent[0]->sumW());
MSG_DEBUG("Event has " << _subEventWeights.size() << " sub events.");
_subEventWeights.clear();
}
- _eventCounter.newSubEvent();
+ _eventCounter.get()->newSubEvent();
for (const AnaHandle& a : _analyses) {
- for (MultiweightAOPtr & ao : a->analysisObjects()) {
- ao.newSubEvent();
+ for (auto ao : a->analysisObjects()) {
+ ao.get()->newSubEvent();
}
}
_subEventWeights.push_back(event.weights());
MSG_DEBUG("Analyzing subevent #" << _subEventWeights.size() - 1 << ".");
// Cross-section
#ifdef HEPMC_HAS_CROSS_SECTION
if (ge.cross_section()) {
_xs = ge.cross_section()->cross_section();
_xserr = ge.cross_section()->cross_section_error();
}
#endif
_eventCounter->fill();
// Run the analyses
for (AnaHandle a : _analyses) {
MSG_TRACE("About to run analysis " << a->name());
try {
a->analyze(event);
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl;
exit(1);
}
MSG_TRACE("Finished running analysis " << a->name());
}
}
void AnalysisHandler::analyze(const GenEvent* ge) {
if (ge == NULL) {
MSG_ERROR("AnalysisHandler received null pointer to GenEvent");
//throw Error("AnalysisHandler received null pointer to GenEvent");
}
analyze(*ge);
}
void AnalysisHandler::finalize() {
if (!_initialised) return;
MSG_INFO("Finalising analyses");
MSG_TRACE("AnalysisHandler::finalize(): Pushing analysis objects to persistent.");
- _eventCounter.pushToPersistent(_subEventWeights);
+ _eventCounter.get()->pushToPersistent(_subEventWeights);
for (const AnaHandle& a : _analyses) {
- for (MultiweightAOPtr & ao : a->analysisObjects())
- ao.pushToPersistent(_subEventWeights);
+ for (auto ao : a->analysisObjects())
+ ao.get()->pushToPersistent(_subEventWeights);
}
for (const AnaHandle& a : _analyses) {
a->setCrossSection(_xs);
for (size_t iW = 0; iW < numWeights(); iW++) {
- _eventCounter.setActiveWeightIdx(iW);
- for (MultiweightAOPtr & ao : a->analysisObjects())
- ao.setActiveWeightIdx(iW);
+ _eventCounter.get()->setActiveWeightIdx(iW);
+ for (auto ao : a->analysisObjects())
+ ao.get()->setActiveWeightIdx(iW);
MSG_TRACE("running " << a->name() << "::finalize() for weight " << iW << ".");
try {
a->finalize();
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl;
exit(1);
}
}
// allow AO destruction again
- for (MultiweightAOPtr & ao : a->analysisObjects())
- ao.blockDestructor(false);
+ for (auto ao : a->analysisObjects())
+ ao.get()->blockDestructor(false);
}
// Print out number of events processed
MSG_INFO("Processed " << numEvents() << " event" << (numEvents() == 1 ? "" : "s"));
// // Delete analyses
// MSG_DEBUG("Deleting analyses");
// _analyses.clear();
// Print out MCnet boilerplate
cout << endl;
cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << endl;
cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl;
}
AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) {
// Check for a duplicate analysis
/// @todo Might we want to be able to run an analysis twice, with different params?
/// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects.
for (const AnaHandle& a : _analyses) {
if (a->name() == analysisname) {
MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate");
return *this;
}
}
AnaHandle analysis( AnalysisLoader::getAnalysis(analysisname) );
if (analysis.get() != 0) { // < Check for null analysis.
MSG_DEBUG("Adding analysis '" << analysisname << "'");
analysis->_analysishandler = this;
_analyses.insert(analysis);
} else {
MSG_WARNING("Analysis '" << analysisname << "' not found.");
}
// MSG_WARNING(_analyses.size());
// for (const AnaHandle& a : _analyses) MSG_WARNING(a->name());
return *this;
}
AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) {
std::shared_ptr<Analysis> toremove;
for (const AnaHandle a : _analyses) {
if (a->name() == analysisname) {
toremove = a;
break;
}
}
if (toremove.get() != 0) {
MSG_DEBUG("Removing analysis '" << analysisname << "'");
_analyses.erase(toremove);
}
return *this;
}
void AnalysisHandler::addData(const std::vector<YODA::AnalysisObjectPtr>& aos) {
for (const YODA::AnalysisObjectPtr ao : aos) {
const string path = ao->path();
if (path.size() > 1) { // path > "/"
try {
const string ananame = ::split(path, "/")[0];
AnaHandle a = analysis(ananame);
//MultiweightAOPtr mao = ????; /// @todo generate right Multiweight object from ao
//a->addAnalysisObject(mao); /// @todo Need to statistically merge...
} catch (const Error& e) {
MSG_WARNING(e.what());
}
}
}
}
void AnalysisHandler::readData(const string& filename) {
vector<YODA::AnalysisObjectPtr> aos;
try {
/// @todo Use new YODA SFINAE to fill the smart ptr vector directly
vector<YODA::AnalysisObject*> aos_raw;
YODA::ReaderYODA::read(filename, aos_raw);
for (YODA::AnalysisObject* aor : aos_raw) aos.push_back(YODA::AnalysisObjectPtr(aor));
} catch (const YODA::ReadError & e) {
throw UserError("Unexpected error in reading file: " + filename);
}
if (!aos.empty()) addData(aos);
}
- vector<reference_wrapper<MultiweightAOPtr> > AnalysisHandler::getRivetAOs() const {
- vector<reference_wrapper<MultiweightAOPtr> > rtn;
+ vector<MultiweightAOPtr> AnalysisHandler::getRivetAOs() const {
+ vector<MultiweightAOPtr> rtn;
for (AnaHandle a : _analyses) {
for (const auto & ao : a->analysisObjects()) {
rtn.push_back(ao);
}
}
// Should event counter be included here?
rtn.push_back(_eventCounter);
return rtn;
}
vector<YODA::AnalysisObjectPtr> AnalysisHandler::getYodaAOs() const {
vector<YODA::AnalysisObjectPtr> rtn;
- for (MultiweightAOPtr & rao : getRivetAOs()) {
+ for (auto rao : getRivetAOs()) {
// need to set the index
// before we can search the PATH
- rao.setActiveWeightIdx(0);
+ rao.get()->setActiveWeightIdx(0);
if (rao->path().find("/TMP/") != string::npos)
continue;
for (size_t iW = 0; iW < numWeights(); iW++) {
- rao.setActiveWeightIdx(iW);
+ rao.get()->setActiveWeightIdx(iW);
// add the weight name in brackets unless we recognize a
// nominal weight
if (_weightNames[iW] != "Weight"
&& _weightNames[iW] != "0"
&& _weightNames[iW] != "Default")
rao->setPath(rao->path() + "[" + _weightNames[iW] + "]");
- rtn.push_back(rao.activeYODAPtr());
+ rtn.push_back(rao.get()->activeYODAPtr());
}
}
YODA::Scatter1D::Points pts; pts.insert(YODA::Point1D(_xs, _xserr));
rtn.push_back( make_shared<Scatter1D>(pts, "/_XSEC") );
sort(rtn.begin(), rtn.end(),
[](YODA::AnalysisObjectPtr a, YODA::AnalysisObjectPtr b) {
return a->path() < b->path();
}
);
return rtn;
}
vector<YODA::AnalysisObjectPtr> AnalysisHandler::getData() const {
return getYodaAOs();
}
void AnalysisHandler::writeData(const string& filename) const {
const vector<YODA::AnalysisObjectPtr> aos = getData();
try {
YODA::WriterYODA::write(filename, aos.begin(), aos.end());
} catch ( YODA::WriteError ) {
throw UserError("Unexpected error in writing file: " + filename);
}
}
string AnalysisHandler::runName() const { return _runname; }
size_t AnalysisHandler::numEvents() const { return _eventCounter->numEntries(); }
/*
* why is this here?
void AnalysisHandler::setSumOfWeights(const double& sum) {
sumOfWeights() = sum;
}
*/
std::vector<std::string> AnalysisHandler::analysisNames() const {
std::vector<std::string> rtn;
for (AnaHandle a : _analyses) {
rtn.push_back(a->name());
}
return rtn;
}
const AnaHandle AnalysisHandler::analysis(const std::string& analysisname) const {
for (const AnaHandle a : analyses())
if (a->name() == analysisname) return a;
throw Error("No analysis named '" + analysisname + "' registered in AnalysisHandler");
}
AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector<std::string>& analysisnames) {
for (const string& aname : analysisnames) {
//MSG_DEBUG("Adding analysis '" << aname << "'");
addAnalysis(aname);
}
return *this;
}
AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector<std::string>& analysisnames) {
for (const string& aname : analysisnames) {
removeAnalysis(aname);
}
return *this;
}
bool AnalysisHandler::needCrossSection() const {
bool rtn = false;
for (const AnaHandle a : _analyses) {
if (!rtn) rtn = a->needsCrossSection();
if (rtn) break;
}
return rtn;
}
AnalysisHandler& AnalysisHandler::setCrossSection(double xs) {
_xs = xs;
return *this;
}
bool AnalysisHandler::hasCrossSection() const {
return (!std::isnan(crossSection()));
}
AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) {
analysis->_analysishandler = this;
_analyses.insert(AnaHandle(analysis));
return *this;
}
PdgIdPair AnalysisHandler::beamIds() const {
return Rivet::beamIds(beams());
}
double AnalysisHandler::sqrtS() const {
return Rivet::sqrtS(beams());
}
void AnalysisHandler::setIgnoreBeams(bool ignore) {
_ignoreBeams=ignore;
}
}
diff --git a/src/Projections/Sphericity.cc b/src/Projections/Sphericity.cc
--- a/src/Projections/Sphericity.cc
+++ b/src/Projections/Sphericity.cc
@@ -1,164 +1,178 @@
// -*- C++ -*-
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Jet.hh"
namespace Rivet {
Sphericity::Sphericity(const FinalState& fsp, double rparam)
: _regparam(rparam)
{
setName("Sphericity");
addProjection(fsp, "FS");
clear();
}
void Sphericity::clear() {
_lambdas = vector<double>(3, 0);
_sphAxes = vector<Vector3>(3, Vector3());
}
int Sphericity::compare(const Projection& p) const {
PCmp fscmp = mkNamedPCmp(p, "FS");
if (fscmp != EQUIVALENT) return fscmp;
const Sphericity& other = dynamic_cast<const Sphericity&>(p);
if (fuzzyEquals(_regparam, other._regparam)) return 0;
return cmp(_regparam, other._regparam);
}
void Sphericity::project(const Event& e) {
const Particles prts = applyProjection<FinalState>(e, "FS").particles();
calc(prts);
}
void Sphericity::calc(const FinalState& fs) {
calc(fs.particles());
}
void Sphericity::calc(const Particles& particles) {
vector<Vector3> threeMomenta;
transform(particles, threeMomenta, p3);
calc(threeMomenta);
}
void Sphericity::calc(const Jets& jets) {
vector<Vector3> threeMomenta;
transform(jets, threeMomenta, p3);
calc(threeMomenta);
}
void Sphericity::calc(const vector<FourMomentum>& momenta) {
vector<Vector3> threeMomenta;
transform(momenta, threeMomenta, [](const FourMomentum& p4){return p4.vector3();});
calc(threeMomenta);
}
Vector3 Sphericity::mkEigenVector(Matrix3 A, const double &lambda) {
const double b = A.get(0,1);
const double c = A.get(0,2);
const double d = A.get(1,1);
const double e = A.get(1,2);
const double f = A.get(2,2);
- double x = e*(b*f -c*e - b*lambda)/(b*e -c*d + c*lambda)/c + (lambda -f)/c;
- double y = (c*e -b*f +b*lambda)/(b*e -c*d + c*lambda);
+ const double denom = b*e -c*d + c*lambda;
+
+ double x = e*(b*f -c*e - b*lambda)/denom/c + (lambda -f)/c;
+ double y = (c*e -b*f +b*lambda)/denom;
Vector3 E(x,y,1);
return E.unit();
}
void Sphericity::calc(const vector<Vector3>& momenta) {
MSG_DEBUG("Calculating sphericity with r = " << _regparam);
// Return (with "safe nonsense" sphericity params) if there are no final state particles
if (momenta.empty()) {
- MSG_DEBUG("No momenta given...");
+ MSG_DEBUG("Not enough momenta given...");
clear();
return;
}
// Iterate over all the final state particles.
Matrix3 mMom;
double totalMomentum = 0.0;
MSG_DEBUG("Number of particles = " << momenta.size());
for (const Vector3& p3 : momenta) {
// Build the (regulated) normalising factor.
totalMomentum += pow(p3.mod(), _regparam);
// Build (regulated) quadratic momentum components.
const double regfactor = pow(p3.mod(), _regparam-2);
if (!fuzzyEquals(regfactor, 1.0)) {
MSG_TRACE("Regfactor (r=" << _regparam << ") = " << regfactor);
}
Matrix3 mMomPart;
for (size_t i = 0; i < 3; ++i) {
for (size_t j = 0; j < 3; ++j) {
mMomPart.set(i,j, p3[i]*p3[j]);
}
}
mMom += regfactor * mMomPart;
}
+ if (mMom.get(2,0) == 0 && mMom.get(2,1) == 0 && mMom.get(2,2) == 0) {
+ MSG_DEBUG("No longitudinal momenta given...");
+ clear();
+ return;
+ }
+
// Normalise to total (regulated) momentum.
mMom /= totalMomentum;
MSG_DEBUG("Momentum tensor = " << "\n" << mMom);
// Check that the matrix is symmetric.
const bool isSymm = mMom.isSymm();
if (!isSymm) {
MSG_ERROR("Error: momentum tensor not symmetric (r=" << _regparam << ")");
MSG_ERROR("[0,1] vs. [1,0]: " << mMom.get(0,1) << ", " << mMom.get(1,0));
MSG_ERROR("[0,2] vs. [2,0]: " << mMom.get(0,2) << ", " << mMom.get(2,0));
MSG_ERROR("[1,2] vs. [2,1]: " << mMom.get(1,2) << ", " << mMom.get(2,1));
}
// If not symmetric, something's wrong (we made sure the error msg appeared first).
assert(isSymm);
// Eigenvalues
const double q = mMom.trace()/3.;
const double p1 = mMom.get(0,1)*mMom.get(0,1) + mMom.get(0,2)*mMom.get(0,2) + mMom.get(1,2)*mMom.get(1,2);
const double p2 = (mMom.get(0,0) - q)*(mMom.get(0,0) - q)
+ (mMom.get(1,1) - q)*(mMom.get(1,1) - q) + (mMom.get(2,2) - q)*(mMom.get(2,2) - q) + 2.*p1;
const double p = sqrt(p2/6.);
Matrix3 I3 = Matrix3::mkIdentity();
const double r = ( 1./p * (mMom - q*I3)).det()/2.;
double phi(0);
if (r <= -1) phi = M_PI / 3.;
else if (r >= 1) phi = 0;
else phi = acos(r) / 3.;
const double l1 = q + 2 * p * cos(phi);
const double l3 = q + 2 * p * cos(phi + (2*M_PI/3.));
const double l2 = 3 * q - l1 - l3;
+ if (l1 == 0 || l2 == 0 || l3 == 0) {
+ MSG_DEBUG("Zero eigenvalue...");
+ clear();
+ return;
+ }
+
_lambdas.clear();
_sphAxes.clear();
_sphAxes.push_back(mkEigenVector(mMom, l1));
_sphAxes.push_back(mkEigenVector(mMom, l2));
_sphAxes.push_back(mkEigenVector(mMom, l3));
_lambdas.push_back(l1);
_lambdas.push_back(l2);
_lambdas.push_back(l3);
// Debug output.
MSG_DEBUG("Lambdas = ("
<< lambda1() << ", " << lambda2() << ", " << lambda3() << ")");
MSG_DEBUG("Sum of lambdas = " << lambda1() + lambda2() + lambda3());
MSG_DEBUG("Vectors = "
<< sphericityAxis() << ", "
<< sphericityMajorAxis() << ", "
<< sphericityMinorAxis() << ")");
}
}
diff --git a/src/Tools/RivetYODA.cc b/src/Tools/RivetYODA.cc
--- a/src/Tools/RivetYODA.cc
+++ b/src/Tools/RivetYODA.cc
@@ -1,392 +1,392 @@
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/Tools/RivetYODA.hh"
#include "Rivet/Tools/RivetPaths.hh"
#include "YODA/ReaderYODA.h"
#include "YODA/ReaderAIDA.h"
-#include "DummyConfig.hh"
-#ifdef HAVE_EXECINFO_H
-#include <execinfo.h>
-#endif
+// #include "DummyConfig.hh"
+// #ifdef HAVE_EXECINFO_H
+// #include <execinfo.h>
+// #endif
using namespace std;
namespace Rivet {
template <class T>
-Wrapper<T>::~Wrapper() {
- // checking the use_count, because we only want to prevent destruction of the last one,
- // which seems to have a count of 2 for some reason
- if ( _blockDestructor // we're a registered AO
- && !_persistent.empty() // with some entries
- && _persistent[0] // that are non-null
- && _persistent[0].use_count() <= 2 ) { // and have no refs left => can't destruct
-#ifdef HAVE_BACKTRACE
- void * buffer[4];
- backtrace(buffer, 4);
- backtrace_symbols_fd(buffer, 4 , 1);
-#endif
- cerr << "***\n"
- << "* Cannot destruct temporary AO before finalize.\n"
- << "* All booked AOs must be class members.\n"
- << "***\n";
- assert(false);
- }
-}
+Wrapper<T>::~Wrapper() {}
+// // checking the use_count, because we only want to prevent destruction of the last one,
+// // which seems to have a count of 2 for some reason
+// if ( _blockDestructor // we're a registered AO
+// && !_persistent.empty() // with some entries
+// && _persistent[0] // that are non-null
+// && _persistent[0].use_count() <= 2 ) { // and have no refs left => can't destruct
+// #ifdef HAVE_BACKTRACE
+// void * buffer[4];
+// backtrace(buffer, 4);
+// backtrace_symbols_fd(buffer, 4 , 1);
+// #endif
+// cerr << "***\n"
+// << "* Cannot destruct temporary AO before finalize.\n"
+// << "* All booked AOs must be class members.\n"
+// << "***\n";
+// assert(false);
+// }
+// }
template <class T>
Wrapper<T>::Wrapper(size_t len_of_weightvec, const T & p)
{
for (size_t m = 0; m < len_of_weightvec; ++m)
_persistent.push_back(make_shared<T>(p));
}
template <class T>
typename T::Ptr Wrapper<T>::active() const {
if ( !_active ) {
assert(false && "No active pointer set. Was this object booked in init()?");
}
return _active;
}
template <class T>
void Wrapper<T>::newSubEvent() {
typename TupleWrapper<T>::Ptr tmp
= make_shared<TupleWrapper<T>>(_persistent[0]->clone());
tmp->reset();
_evgroup.push_back( tmp );
_active = _evgroup.back();
assert(_active);
}
string getDatafilePath(const string& papername) {
/// Try to find YODA otherwise fall back to try AIDA
const string path1 = findAnalysisRefFile(papername + ".yoda");
if (!path1.empty()) return path1;
const string path2 = findAnalysisRefFile(papername + ".aida");
if (!path2.empty()) return path2;
throw Rivet::Error("Couldn't find ref data file '" + papername + ".yoda" +
" in data path, '" + getRivetDataPath() + "', or '.'");
}
map<string, YODA::AnalysisObjectPtr> getRefData(const string& papername) {
const string datafile = getDatafilePath(papername);
// Make an appropriate data file reader and read the data objects
/// @todo Remove AIDA support some day...
YODA::Reader& reader = (datafile.find(".yoda") != string::npos) ? \
YODA::ReaderYODA::create() : YODA::ReaderAIDA::create();
vector<YODA::AnalysisObject *> aovec;
reader.read(datafile, aovec);
// Return value, to be populated
map<string, YODA::AnalysisObjectPtr> rtn;
foreach ( YODA::AnalysisObject* ao, aovec ) {
YODA::AnalysisObjectPtr refdata(ao);
if (!refdata) continue;
const string plotpath = refdata->path();
// Split path at "/" and only return the last field, i.e. the histogram ID
const size_t slashpos = plotpath.rfind("/");
const string plotname = (slashpos+1 < plotpath.size()) ? plotpath.substr(slashpos+1) : "";
rtn[plotname] = refdata;
}
return rtn;
}
}
namespace {
using Rivet::Fill;
using Rivet::Fills;
using Rivet::TupleWrapper;
template <class T>
double get_window_size(const typename T::Ptr & histo,
typename T::BinType x) {
// the bin index we fall in
const auto binidx = histo->binIndexAt(x);
// gaps, overflow, underflow don't contribute
if ( binidx == -1 )
return 0;
const auto & b = histo->bin(binidx);
// if we don't have a valid neighbouring bin,
// we use infinite width
typename T::Bin b1(-1.0/0.0, 1.0/0.0);
// points in the top half compare to the upper neighbour
if ( x > b.xMid() ) {
size_t nextidx = binidx + 1;
if ( nextidx < histo->bins().size() )
b1 = histo->bin(nextidx);
}
else { // compare to the lower neighbour
int nextidx = binidx - 1;
if ( nextidx >= 0 )
b1 = histo->bin(nextidx);
}
// the factor 2 is arbitrary, could poss. be smaller
return min( b.width(), b1.width() ) / 2.0;
}
template <class T>
typename T::BinType
fillT2binT(typename T::FillType a) {
return a;
}
template <>
YODA::Profile1D::BinType
fillT2binT<YODA::Profile1D>(YODA::Profile1D::FillType a) {
return get<0>(a);
}
template <>
YODA::Profile2D::BinType
fillT2binT<YODA::Profile2D>(YODA::Profile2D::FillType a) {
return YODA::Profile2D::BinType{ get<0>(a), get<1>(a) };
}
template <class T>
void commit(vector<typename T::Ptr> & persistent,
const vector< vector<Fill<T>> > & tuple,
const vector<valarray<double>> & weights ) {
// TODO check if all the xs are in the same bin anyway!
// Then no windowing needed
assert(persistent.size() == weights[0].size());
for ( const auto & x : tuple ) {
double maxwindow = 0.0;
for ( const auto & xi : x ) {
// TODO check for NOFILL here
// persistent[0] has the same binning as all the other persistent objects
double window = get_window_size<T>(persistent[0], fillT2binT<T>(xi.first));
if ( window > maxwindow )
maxwindow = window;
}
const double wsize = maxwindow;
// all windows have same size
set<double> edgeset;
// bin edges need to be in here!
for ( const auto & xi : x ) {
edgeset.insert(fillT2binT<T>(xi.first) - wsize);
edgeset.insert(fillT2binT<T>(xi.first) + wsize);
}
vector< std::tuple<double,valarray<double>,double> > hfill;
double sumf = 0.0;
auto edgit = edgeset.begin();
double ehi = *edgit;
while ( ++edgit != edgeset.end() ) {
double elo = ehi;
ehi = *edgit;
valarray<double> sumw(0.0, persistent.size()); // need m copies of this
bool gap = true; // Check for gaps between the sub-windows.
for ( size_t i = 0; i < x.size(); ++i ) {
// check equals comparisons here!
if ( fillT2binT<T>(x[i].first) + wsize >= ehi
&&
fillT2binT<T>(x[i].first) - wsize <= elo ) {
sumw += x[i].second * weights[i];
gap = false;
}
}
if ( gap ) continue;
hfill.push_back( make_tuple( (ehi + elo)/2.0, sumw, (ehi - elo) ) );
sumf += ehi - elo;
}
for ( auto f : hfill )
for ( size_t m = 0; m < persistent.size(); ++m )
persistent[m]->fill( get<0>(f), get<1>(f)[m], get<2>(f)/sumf );
// Note the scaling to one single fill
}
}
template<>
void commit<YODA::Histo2D>(vector<YODA::Histo2D::Ptr> & persistent,
const vector< vector<Fill<YODA::Histo2D>> > & tuple,
const vector<valarray<double>> & weights)
{}
template<>
void commit<YODA::Profile2D>(vector<YODA::Profile2D::Ptr> & persistent,
const vector< vector<Fill<YODA::Profile2D>> > & tuple,
const vector<valarray<double>> & weights)
{}
template <class T>
double distance(T a, T b) {
return abs(a - b);
}
template <>
double distance<tuple<double,double> >(tuple<double,double> a, tuple<double,double> b) {
return Rivet::sqr(get<0>(a) - get<0>(b)) + Rivet::sqr(get<1>(a) - get<1>(b));
}
}
/// fills is a vector of sub-event with an ordered set of x-values of
/// the fills in each sub-event. NOFILL should be an "impossible"
/// value for this histogram. Returns a vector of sub-events with
/// an ordered vector of fills (including NOFILLs) for each sub-event.
template <class T>
vector< vector<Fill<T> > >
match_fills(const vector<typename TupleWrapper<T>::Ptr> & evgroup, const Fill<T> & NOFILL)
{
vector< vector<Fill<T> > > matched;
// First just copy subevents into vectors and find the longest vector.
unsigned int maxfill = 0; // length of biggest vector
int imax = 0; // index position of biggest vector
for ( const auto & it : evgroup ) {
const auto & subev = it->fills();
if ( subev.size() > maxfill ) {
maxfill = subev.size();
imax = matched.size();
}
matched.push_back(vector<Fill<T> >(subev.begin(), subev.end()));
}
// Now, go through all subevents with missing fills.
const vector<Fill<T>> & full = matched[imax]; // the longest one
for ( auto & subev : matched ) {
if ( subev.size() == maxfill ) continue;
// Add NOFILLs to the end;
while ( subev.size() < maxfill ) subev.push_back(NOFILL);
// Iterate from the back and shift all fill values backwards by
// swapping with NOFILLs so that they better match the full
// subevent.
for ( int i = maxfill - 1; i >= 0; --i ) {
if ( subev[i] == NOFILL ) continue;
size_t j = i;
while ( j + 1 < maxfill && subev[j + 1] == NOFILL &&
distance(fillT2binT<T>(subev[j].first),
fillT2binT<T>(full[j].first))
>
distance(fillT2binT<T>(subev[j].first),
fillT2binT<T>(full[j + 1].first)) )
{
swap(subev[j], subev[j + 1]);
++j;
}
}
}
// transpose
vector<vector<Fill<T>>> result(maxfill,vector<Fill<T>>(matched.size()));
for (size_t i = 0; i < matched.size(); ++i)
for (size_t j = 0; j < maxfill; ++j)
result.at(j).at(i) = matched.at(i).at(j);
return result;
}
namespace Rivet {
template <class T>
void Wrapper<T>::pushToPersistent(const vector<valarray<double> >& weight) {
assert( _evgroup.size() == weight.size() );
// have we had subevents at all?
const bool have_subevents = _evgroup.size() > 1;
if ( ! have_subevents ) {
// simple replay of all tuple entries
// each recorded fill is inserted into all persistent weightname histos
for ( size_t m = 0; m < _persistent.size(); ++m )
for ( const auto & f : _evgroup[0]->fills() )
_persistent[m]->fill( f.first, f.second * weight[0][m] );
} else {
// outer index is subevent, inner index is jets in the event
vector<vector<Fill<T>>> linedUpXs
= match_fills<T>(_evgroup, {typename T::FillType(), 0.0});
commit<T>( _persistent, linedUpXs, weight );
}
_evgroup.clear();
_active.reset();
}
template <>
void Wrapper<YODA::Counter>::pushToPersistent(const vector<valarray<double> >& weight) {
for ( size_t m = 0; m < _persistent.size(); ++m ) {
for ( size_t n = 0; n < _evgroup.size(); ++n ) {
for ( const auto & f : _evgroup[n]->fills() ) {
_persistent[m]->fill( f.second * weight[n][m] );
}
}
}
_evgroup.clear();
_active.reset();
}
template <>
void Wrapper<YODA::Scatter1D>::pushToPersistent(const vector<valarray<double> >& weight) {
cout << ("WARNING: filling scatters in the event loop is not a well-defined behavior!!") << endl;
_evgroup.clear();
_active.reset();
}
template <>
void Wrapper<YODA::Scatter2D>::pushToPersistent(const vector<valarray<double> >& weight) {
cout << ("WARNING: filling scatters in the event loop is not a well-defined behavior!!") << endl;
_evgroup.clear();
_active.reset();
}
template <>
void Wrapper<YODA::Scatter3D>::pushToPersistent(const vector<valarray<double> >& weight) {
cout << ("WARNING: filling scatters in the event loop is not a well-defined behavior!!") << endl;
_evgroup.clear();
_active.reset();
}
// explicitly instantiate all wrappers
template class Wrapper<YODA::Histo1D>;
template class Wrapper<YODA::Histo2D>;
template class Wrapper<YODA::Profile1D>;
template class Wrapper<YODA::Profile2D>;
template class Wrapper<YODA::Counter>;
template class Wrapper<YODA::Scatter1D>;
template class Wrapper<YODA::Scatter2D>;
template class Wrapper<YODA::Scatter3D>;
}

File Metadata

Mime Type
text/x-diff
Expires
Thu, Apr 3, 8:01 PM (3 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4722776
Default Alt Text
(829 KB)

Event Timeline